From 0932df4908f674ee7632d24c4db2a136872dea92 Mon Sep 17 00:00:00 2001 From: harborn Date: Fri, 1 Mar 2024 13:22:44 +0800 Subject: [PATCH] fix gpu workflow (#125) * fix gpu workflow * update --- .github/workflows/workflow_finetune_gpu.yml | 6 +++--- inference/models/mpt-7b.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/workflow_finetune_gpu.yml b/.github/workflows/workflow_finetune_gpu.yml index 4dcb43da3..e0f4c7830 100644 --- a/.github/workflows/workflow_finetune_gpu.yml +++ b/.github/workflows/workflow_finetune_gpu.yml @@ -38,9 +38,9 @@ jobs: - name: Running task on Intel GPU run: | - rm ~/borealis-runner/llm-on-ray.tar.gz -f - tar zcf ~/borealis-runner/llm-on-ray.tar.gz -C ~/actions-runner/_work/llm-on-ray . - cd ~/borealis-runner/ + rm /home/ci/borealis-runner/llm-on-ray.tar.gz -f + tar zcf /home/ci/borealis-runner/llm-on-ray.tar.gz -C /home/ci/actions-runner/_work/llm-on-ray . + cd /home/ci/borealis-runner/ python3 finetune_on_pvc.py --need_create_conda_env true --base_models "${{ matrix.model }}" - name: Test Summary run: echo "to be continued" diff --git a/inference/models/mpt-7b.yaml b/inference/models/mpt-7b.yaml index 5b47833ab..fb6f1a125 100644 --- a/inference/models/mpt-7b.yaml +++ b/inference/models/mpt-7b.yaml @@ -8,7 +8,7 @@ deepspeed: false workers_per_group: 2 device: CPU ipex: - enabled: true + enabled: false precision: bf16 model_description: model_id_or_path: mosaicml/mpt-7b