Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests and ci/cd #13

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
128 changes: 128 additions & 0 deletions .github/workflows/ci-cd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
name: replicate-ci-cd

on:
pull_request:
branches:
- main
push:
branches:
- main
workflow_dispatch:

jobs:
lint:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12'

- name: Install dependencies
run: |
pip install ruff

- name: Run ruff linter
run: |
ruff check

- name: Run ruff formatter
run: |
ruff format --diff

build-and-push:
runs-on: ubuntu-latest
if: ${{ !contains(github.event.head_commit.message, '[skip-cd]') }}
strategy:
matrix:
include:
- event: pull_request
model: 'dev', 'schnell'
env: 'test'
- event: push
model: 'dev', 'schnell'
env: 'prod'
- event: workflow_dispatch
model: 'dev', 'schnell'
env: 'prod'

steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Determine changes
id: what-changed
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
FILES_CHANGED=$(git diff --name-only --diff-filter=AMR origin/${{ github.base_ref }} HEAD)
else
FILES_CHANGED=$(git diff --name-only --diff-filter=AMR HEAD~1 HEAD)
fi
echo "FILES_CHANGED=$FILES_CHANGED" >> $GITHUB_ENV
if echo "$FILES_CHANGED" | grep -q 'cog.yaml' || ${{ contains(github.event.head_commit.message, '[cog build]') }}; then
echo "cog-push=true" >> $GITHUB_OUTPUT
else
echo "cog-push=false" >> $GITHUB_OUTPUT
fi

- name: Setup Cog
if: steps.what-changed.outputs.cog-push == 'true'
uses: replicate/setup-cog@v2
with:
token: ${{ secrets.REPLICATE_API_TOKEN }}
install-cuda: false
cog-version: "v0.9.20"

- name: Free disk space
if: steps.what-changed.outputs.cog-push == 'true'
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true

- name: Cog build and push
if: steps.what-changed.outputs.cog-push == 'true'
run: |
./script/push.sh ${{ matrix.model }} ${{ matrix.env }}

- name: Install Yolo
if: steps.what-changed.outputs.cog-push == 'false'
run: |
sudo curl -o /usr/local/bin/yolo -L "https://github.com/replicate/yolo/releases/latest/download/yolo_$(uname -s)_$(uname -m)"
sudo chmod +x /usr/local/bin/yolo

- name: Yolo push
if: steps.what-changed.outputs.cog-push == 'false'
env:
REPLICATE_API_TOKEN: ${{secrets.REPLICATE_API_TOKEN}}
run: |
echo "pushing changes to ${{ matrix.model }}"
echo "changed files: $FILES_CHANGED"
yolo push --base ${{ matrix.model }} --dest ${{ matrix.model }} $FILES_CHANGED

- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.10'

- name: Install Python test dependencies
run: |
pip install -r requirements_test.txt

- name: Test model
env:
REPLICATE_API_TOKEN: ${{ secrets.REPLICATE_API_TOKEN }}
MODEL: ${{ matrix.model }}
TEST_ENV: ${{ matrix.env }}
run: |
./script/test.sh ${{ matrix.model }} ${{ matrix.env }}
8 changes: 6 additions & 2 deletions flux/modules/conditioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,14 @@ def __init__(self, version: str, max_length: int, is_clip=False, **hf_kwargs):
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"

if self.is_clip:
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version + "/tokenizer", max_length=max_length)
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(
version + "/tokenizer", max_length=max_length
)
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version + "/model", **hf_kwargs)
else:
self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version + "/tokenizer", max_length=max_length)
self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(
version + "/tokenizer", max_length=max_length
)
self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version + "/model", **hf_kwargs)

self.hf_module = self.hf_module.eval().requires_grad_(False)
Expand Down
24 changes: 9 additions & 15 deletions flux/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def denoise_single_item(
vec: Tensor,
timesteps: list[float],
guidance: float = 4.0,
compile_run: bool = False
compile_run: bool = False,
):
img = img.unsqueeze(0)
img_ids = img_ids.unsqueeze(0)
Expand All @@ -113,14 +113,14 @@ def denoise_single_item(
vec = vec.unsqueeze(0)
guidance_vec = torch.full((1,), guidance, device=img.device, dtype=img.dtype)

if compile_run:
torch._dynamo.mark_dynamic(img, 1, min=256, max=8100) # needs at least torch 2.4
if compile_run:
torch._dynamo.mark_dynamic(img, 1, min=256, max=8100) # needs at least torch 2.4
torch._dynamo.mark_dynamic(img_ids, 1, min=256, max=8100)
model = torch.compile(model)

for t_curr, t_prev in tqdm(zip(timesteps[:-1], timesteps[1:])):
t_vec = torch.full((1,), t_curr, dtype=img.dtype, device=img.device)

pred = model(
img=img,
img_ids=img_ids,
Expand All @@ -135,6 +135,7 @@ def denoise_single_item(

return img, model


def denoise(
model: Flux,
# model input
Expand All @@ -146,28 +147,21 @@ def denoise(
# sampling parameters
timesteps: list[float],
guidance: float = 4.0,
compile_run: bool = False
compile_run: bool = False,
):
batch_size = img.shape[0]
output_imgs = []

for i in range(batch_size):
denoised_img, model = denoise_single_item(
model,
img[i],
img_ids[i],
txt[i],
txt_ids[i],
vec[i],
timesteps,
guidance,
compile_run
model, img[i], img_ids[i], txt[i], txt_ids[i], vec[i], timesteps, guidance, compile_run
)
compile_run = False
output_imgs.append(denoised_img)

return torch.cat(output_imgs), model


def unpack(x: Tensor, height: int, width: int) -> Tensor:
return rearrange(
x,
Expand Down
3 changes: 2 additions & 1 deletion flux/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class ModelSpec:
ae_path: str | None
ae_url: str | None


T5_URL = "https://weights.replicate.delivery/default/official-models/flux/t5/t5-v1_1-xxl.tar"
T5_CACHE = "./model-cache/t5"
CLIP_URL = "https://weights.replicate.delivery/default/official-models/flux/clip/clip-vit-large-patch14.tar"
Expand Down Expand Up @@ -191,4 +192,4 @@ def download_weights(url: str, dest: str):
subprocess.check_call(["pget", "-x", url, dest], close_fds=False)
else:
subprocess.check_call(["pget", url, dest], close_fds=False)
print("downloading took: ", time.time() - start)
print("downloading took: ", time.time() - start)
Loading