Skip to content

Commit

Permalink
Merge branch 'main' into schoi/mistral
Browse files Browse the repository at this point in the history
  • Loading branch information
schoi-habana committed Jul 25, 2024
2 parents 11e9c1b + 7814fe4 commit cd8932d
Show file tree
Hide file tree
Showing 119 changed files with 11,242 additions and 786 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/fast_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ concurrency:
jobs:
transformers:
name: Run tests for optimum.habana.transformers
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -39,7 +40,8 @@ jobs:
name: Run tests for optimum.habana.diffusers
needs:
- transformers # required to wait for the previous tests to finish
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand Down
27 changes: 18 additions & 9 deletions .github/workflows/slow_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ concurrency:
jobs:
example-diff:
name: Test examples differences
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -37,7 +38,8 @@ jobs:
if: ${{ !cancelled() && (success() || failure()) }}
needs:
- example-diff # run the job when the previous test job is done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -63,7 +65,8 @@ jobs:
needs:
- example-diff
- stable-diffusion # run the job when the previous test job is done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -89,7 +92,8 @@ jobs:
needs:
- example-diff
- deepspeed # run the job when the previous test job is done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand All @@ -116,7 +120,8 @@ jobs:
- example-diff
- deepspeed
- multi-card # run the job when the previous test jobs are done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand Down Expand Up @@ -144,7 +149,8 @@ jobs:
- deepspeed
- multi-card
- single-card # run the job when the previous test jobs are done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
if: github.event.schedule == '0 21 * * 6'
Expand Down Expand Up @@ -179,7 +185,8 @@ jobs:
- multi-card
- single-card
- albert-xxl-single-card # run the job when the previous test jobs are done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand Down Expand Up @@ -209,7 +216,8 @@ jobs:
- single-card
- albert-xxl-single-card
- text-generation # run the job when the previous test jobs are done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout
uses: actions/checkout@v2
Expand Down Expand Up @@ -240,7 +248,8 @@ jobs:
- albert-xxl-single-card
- text-generation
- trl # run the job when the previous test jobs are done
runs-on: [self-hosted, linux, x64, gaudi-habana] # run the job on the newly created runner
runs-on:
group: aws-dl1-24xlarge
steps:
- name: Checkout Optimum Habana
uses: actions/checkout@v2
Expand Down
29 changes: 29 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,32 @@ fast_tests_diffusers:
python -m pip install .[tests]
python -m pytest tests/test_diffusers.py

# Run single-card non-regression tests on image classification models
fast_tests_image_classifications:
pip install timm
python -m pip install .[tests]
python -m pytest tests/test_image_classification.py

# Run unit and integration tests related to Image segmentation
fast_tests_image_segmentation:
python -m pip install .[tests]
python -m pytest tests/test_image_segmentation.py

# Run unit and integration tests related to text feature extraction
fast_tests_feature_extraction:
python -m pip install .[tests]
python -m pytest tests/test_feature_extraction.py

# Run unit and integration tests related to VideoMAE
fast_test_videomae:
python -m pip install .[tests]
python -m pytest tests/test_video_mae.py

# Run single-card non-regression tests
slow_tests_1x: test_installs
python -m pytest tests/test_examples.py -v -s -k "single_card"
python -m pip install peft==0.10.0
python -m pytest tests/test_peft_inference.py
python -m pytest tests/test_pipeline.py

# Run multi-card non-regression tests
Expand All @@ -61,6 +84,7 @@ slow_tests_diffusers: test_installs
python -m pip install peft==0.7.0
python -m pytest tests/test_diffusers.py -v -s -k "test_train_text_to_image_"
python -m pytest tests/test_diffusers.py -v -s -k "test_train_controlnet"
python -m pytest tests/test_diffusers.py -v -s -k "test_deterministic_image_generation"

# Run text-generation non-regression tests
slow_tests_text_generation_example: test_installs
Expand All @@ -71,6 +95,11 @@ slow_tests_text_generation_example: test_installs
slow_tests_image_to_text_example: test_installs
python -m pytest tests/test_image_to_text_example.py -v -s --token $(TOKEN)

# Run visual question answering tests
slow_tests_openclip_vqa_example: test_installs
python -m pip install -r examples/visual-question-answering/openclip_requirements.txt
python -m pytest tests/test_openclip_vqa.py

slow_tests_fsdp: test_installs
python -m pytest tests/test_fsdp_examples.py -v -s --token $(TOKEN)

Expand Down
22 changes: 17 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ The `--upgrade-strategy eager` option is needed to ensure `optimum-habana` is up
To use the example associated with the latest stable release, run:
> ```
> git clone https://github.com/huggingface/optimum-habana
> cd optimum-habana && git checkout v1.12.0
> cd optimum-habana && git checkout v1.12.1
> ```
> with `v1.12.0` the version number of this release.
> with `v1.12.1` the version number of this release.
### Option 2: Use the latest main branch under development
Expand Down Expand Up @@ -179,7 +179,7 @@ The following model architectures, tasks and device distributions have been vali
| Architecture | Training | Inference | <center>Tasks</center> |
|--------------|:--------:|:---------:|:-----------------------|
| BERT | :heavy_check_mark: | :heavy_check_mark: | <li>[text classification](https://github.com/huggingface/optimum-habana/tree/main/examples/text-classification)</li><li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li> |
| BERT | :heavy_check_mark: | :heavy_check_mark: | <li>[text classification](https://github.com/huggingface/optimum-habana/tree/main/examples/text-classification)</li><li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text feature extraction](https://github.com/huggingface/optimum-habana/tree/main/examples/text-feature-extraction)</li> |
| RoBERTa | :heavy_check_mark: | :heavy_check_mark: | <li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li> |
| ALBERT | :heavy_check_mark: | :heavy_check_mark: | <li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li> |
| DistilBERT |:heavy_check_mark: | :heavy_check_mark: | <li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li> |
Expand All @@ -189,7 +189,7 @@ The following model architectures, tasks and device distributions have been vali
| GPT-J | <div style="text-align:left"><li>DeepSpeed</li></div> | <div style="text-align:left"><li>Single card</li><li>DeepSpeed</li></div> | <li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
| GPT-NeoX | <div style="text-align:left"><li>DeepSpeed</li></div> | <div style="text-align:left"><li>DeepSpeed</li></div> | <li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
| OPT | | <div style="text-align:left"><li>DeepSpeed</li></div> | <li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
| Llama 2 / CodeLlama / Llama 3 / Llama Guard | :heavy_check_mark: | :heavy_check_mark: | <li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li><li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[text classification](https://github.com/huggingface/optimum-habana/tree/main/examples/text-classification) (Llama Guard)</li> |
| Llama 2 / CodeLlama / Llama 3 / Llama Guard / Granite | :heavy_check_mark: | :heavy_check_mark: | <li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li><li>[question answering](https://github.com/huggingface/optimum-habana/tree/main/examples/question-answering)</li><li>[text classification](https://github.com/huggingface/optimum-habana/tree/main/examples/text-classification) (Llama Guard)</li> |
| StableLM | | <div style="text-align:left"><li>Single card</li></div> | <li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
| Falcon | <div style="text-align:left"><li>LoRA</li></div> | :heavy_check_mark: | <li>[language modeling](https://github.com/huggingface/optimum-habana/tree/main/examples/language-modeling)</li><li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
| CodeGen | | <div style="text-align:left"><li>Single card</li></div> | <li>[text generation](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation)</li> |
Expand All @@ -214,6 +214,8 @@ The following model architectures, tasks and device distributions have been vali
| OWLViT | | <div style="text-align:left"><li>Single card</li></div> | <li>[zero shot object detection](https://github.com/huggingface/optimum-habana/tree/main/examples/zero-shot-object-detection)</li> |
| ClipSeg | | <div style="text-align:left"><li>Single card</li></div> | <li>[object segmentation](https://github.com/huggingface/optimum-habana/tree/main/examples/object-segementation)</li> |
| Llava / Llava-next | | <div style="text-align:left"><li>Single card</li></div> | <li>[image to text](https://github.com/huggingface/optimum-habana/tree/main/examples/image-to-text)</li> |
| Segment Anything Model | | <div style="text-align:left"><li>Single card</li></div> | <li>[object segmentation](https://github.com/huggingface/optimum-habana/tree/main/examples/object-segementation)</li> |
| VideoMAE | | <div style="text-align:left"><li>Single card</li></div> | <li>[Video classification](https://github.com/huggingface/optimum-habana/tree/main/examples/video-classification)</li> |
</div>
Expand All @@ -229,6 +231,16 @@ The following model architectures, tasks and device distributions have been vali
</div>
- PyTorch Image Models/TIMM:
<div align="center">
| Architecture | Training | Inference | Tasks |
|---------------------|:--------:|:---------:|:------|
| FastViT | | <div style="text-align:left"><li>Single card</li></div> | <li>[image classification](https://github.com/huggingface/optimum-habana/tree/main/examples/image-classification)</li> |
</div>
- TRL:
<div align="center">
Expand All @@ -249,4 +261,4 @@ After training your model, feel free to submit it to the Intel [leaderboard](htt
## Development
Check the [contributor guide](https://github.com/huggingface/optimum/blob/main/CONTRIBUTING.md) for instructions.
Check the [contributor guide](https://github.com/huggingface/optimum/blob/main/CONTRIBUTING.md) for instructions.
Loading

0 comments on commit cd8932d

Please sign in to comment.