From 0861d66fce403954d6f51ab05eba69cea160509b Mon Sep 17 00:00:00 2001 From: ishaansehgal99 Date: Tue, 12 Nov 2024 12:55:29 -0700 Subject: [PATCH 1/7] feat: supported models --- presets/models/supported_models.yaml | 61 ++++++++++++++++++---------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/presets/models/supported_models.yaml b/presets/models/supported_models.yaml index db49641b8..1ca9f020c 100644 --- a/presets/models/supported_models.yaml +++ b/presets/models/supported_models.yaml @@ -32,15 +32,16 @@ models: # Falcon - name: falcon-7b type: text-generation - version: https://huggingface.co/tiiuae/falcon-7b/commit/898df1396f35e447d5fe44e0a3ccaaaa69f30d36 + version: https://huggingface.co/tiiuae/falcon-7b/commit/ec89142b67d748a1865ea4451372db8313ada0d8 runtime: tfs - tag: 0.0.6 + tag: 0.0.7 - name: falcon-7b-instruct type: text-generation - version: https://huggingface.co/tiiuae/falcon-7b-instruct/commit/cf4b3c42ce2fdfe24f753f0f0d179202fea59c99 + version: https://huggingface.co/tiiuae/falcon-7b-instruct/commit/8782b5c5d8c9290412416618f36a133653e85285 runtime: tfs - tag: 0.0.6 + tag: 0.0.7 # Tag history: + # 0.0.7 - Support VLLM runtime # 0.0.6 - Add Logging & Metrics Server # 0.0.5 - Tuning and Adapters # 0.0.4 - Adjust default model params (#310) @@ -49,15 +50,16 @@ models: # 0.0.1 - Initial Release - name: falcon-40b type: text-generation - version: https://huggingface.co/tiiuae/falcon-40b/commit/4a70170c215b36a3cce4b4253f6d0612bb7d4146 + version: https://huggingface.co/tiiuae/falcon-40b/commit/05ab2ee8d6b593bdbab17d728de5c028a7a94d83 runtime: tfs - tag: 0.0.7 + tag: 0.0.8 - name: falcon-40b-instruct type: text-generation version: https://huggingface.co/tiiuae/falcon-40b-instruct/commit/ecb78d97ac356d098e79f0db222c9ce7c5d9ee5f runtime: tfs - tag: 0.0.7 + tag: 0.0.8 # Tag history for 40b models: + # 0.0.8 - Support VLLM runtime # 0.0.7 - Add Logging & Metrics Server # 0.0.6 - Tuning and Adapters # 0.0.5 - Adjust default model params (#310) @@ -69,15 +71,16 @@ models: # Mistral - name: mistral-7b type: text-generation - version: https://huggingface.co/mistralai/Mistral-7B-v0.3/commit/c882233d224d27b727b3d9299b12a9aab9dda6f7 + version: https://huggingface.co/mistralai/Mistral-7B-v0.3/commit/d8cadc02ac76bd617a919d50b092e59d2d110aff runtime: tfs - tag: 0.0.7 + tag: 0.0.8 - name: mistral-7b-instruct type: text-generation - version: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3/commit/0417f4babd26db0b5ed07c1d0bc85658ab526ea3 + version: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3/commit/e0bc86c23ce5aae1db576c8cca6f06f1f73af2db runtime: tfs - tag: 0.0.7 + tag: 0.0.8 # Tag history: + # 0.0.8 - Support VLLM runtime # 0.0.7 - Add Logging & Metrics Server # 0.0.6 - Update model version and Address missing weights files fix # 0.0.5 - Tuning and Adapters @@ -89,10 +92,11 @@ models: # Phi-2 - name: phi-2 type: text-generation - version: https://huggingface.co/microsoft/phi-2/commit/b10c3eba545ad279e7208ee3a5d644566f001670 + version: https://huggingface.co/microsoft/phi-2/commit/ef382358ec9e382308935a992d908de099b64c23 runtime: tfs - tag: 0.0.5 + tag: 0.0.6 # Tag history: + # 0.0.6 - Support VLLM runtime # 0.0.5 - Add Logging & Metrics Server # 0.0.4 - Tuning and Adapters # 0.0.3 - Adjust default model params (#310) @@ -102,36 +106,49 @@ models: # Phi-3 - name: phi-3-mini-4k-instruct type: text-generation - version: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/commit/d269012bea6fbe38ce7752c8940fea010eea3383 + version: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/commit/0a67737cc96d2554230f90338b163bc6380a2a85 runtime: tfs - tag: 0.0.2 + tag: 0.0.3 # Tag history: + # 0.0.3 - Support VLLM runtime # 0.0.2 - Add Logging & Metrics Server # 0.0.1 - Initial Release - name: phi-3-mini-128k-instruct type: text-generation - version: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/commit/5be6479b4bc06a081e8f4c6ece294241ccd32dec + version: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/commit/a90b62ae09941edff87a90ced39ba5807e6b2ade runtime: tfs - tag: 0.0.2 + tag: 0.0.3 # Tag history: + # 0.0.3 - Support VLLM runtime # 0.0.2 - Add Logging & Metrics Server # 0.0.1 - Initial Release - name: phi-3-medium-4k-instruct type: text-generation - version: https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/commit/d194e4e74ffad5a5e193e26af25bcfc80c7f1ffc + version: https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/commit/ae004ae82eb6eddc32906dfacb1d6dfea8f91996 runtime: tfs - tag: 0.0.2 + tag: 0.0.3 # Tag history: + # 0.0.3 - Support VLLM runtime # 0.0.2 - Add Logging & Metrics Server # 0.0.1 - Initial Release - name: phi-3-medium-128k-instruct type: text-generation - version: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/commit/cae1d42b5577398fd1be9f0746052562ae552886 + version: https://huggingface.co/microsoft/Phi-3-medium-128k-instruct/commit/fa7d2aa4f5ea69b2e36b20d050cdae79c9bfbb3f runtime: tfs - tag: 0.0.2 + tag: 0.0.3 # Tag history: + # 0.0.3 - Support VLLM runtime # 0.0.2 - Add Logging & Metrics Server - # 0.0.1 - Initial Release \ No newline at end of file + # 0.0.1 - Initial Release + + - name: phi-3.5-mini-instruct + type: text-generation + version: https://huggingface.co/microsoft/Phi-3.5-mini-instruct/commit/af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0 + runtime: tfs + tag: 0.0.1 + # Tag history: + # 0.0.1 - New Model! Support VLLM Runtime + \ No newline at end of file From 06d6a350a9a2983210fbd47981a8eea0f27191d1 Mon Sep 17 00:00:00 2001 From: zhuangqh Date: Wed, 13 Nov 2024 10:27:53 +1100 Subject: [PATCH 2/7] fix repo name --- .github/workflows/kind-cluster/determine_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/kind-cluster/determine_models.py b/.github/workflows/kind-cluster/determine_models.py index 6433b4e60..bd55e9d45 100644 --- a/.github/workflows/kind-cluster/determine_models.py +++ b/.github/workflows/kind-cluster/determine_models.py @@ -21,7 +21,7 @@ def read_yaml(file_path): YAML_PR = read_yaml(supp_models_yaml) # Format: {falcon-7b : {model_name:falcon-7b, type:text-generation, version: #, tag: #}} MODELS = {model['name']: model for model in YAML_PR['models']} -KAITO_REPO_URL = "https://github.com/kaito-repo/kaito.git" +KAITO_REPO_URL = "https://github.com/kaito-project/kaito.git" def set_multiline_output(name, value): with open(os.environ['GITHUB_OUTPUT'], 'a') as fh: From c29983a6e14511050de7751672e356580645a03c Mon Sep 17 00:00:00 2001 From: zhuangqh Date: Wed, 13 Nov 2024 10:42:40 +1100 Subject: [PATCH 3/7] fix runner tag --- .github/workflows/preset-image-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/preset-image-build.yml b/.github/workflows/preset-image-build.yml index 66a2f57a4..29780b7fd 100644 --- a/.github/workflows/preset-image-build.yml +++ b/.github/workflows/preset-image-build.yml @@ -75,7 +75,7 @@ jobs: build-models: needs: determine-models if: needs.determine-models.outputs.is_matrix_empty == 'false' - runs-on: [self-hosted, 'hostname:model-server'] + runs-on: [self-hosted, 'hostname:kaito-e2e-github-runner'] environment: preset-env strategy: fail-fast: false From 40417d2bb280596d369fc8d79827fb916cbd6aed Mon Sep 17 00:00:00 2001 From: Ishaan Sehgal Date: Tue, 12 Nov 2024 17:06:14 -0700 Subject: [PATCH 4/7] Update preset-image-build.yml Signed-off-by: Ishaan Sehgal --- .github/workflows/preset-image-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/preset-image-build.yml b/.github/workflows/preset-image-build.yml index 29780b7fd..66a2f57a4 100644 --- a/.github/workflows/preset-image-build.yml +++ b/.github/workflows/preset-image-build.yml @@ -75,7 +75,7 @@ jobs: build-models: needs: determine-models if: needs.determine-models.outputs.is_matrix_empty == 'false' - runs-on: [self-hosted, 'hostname:kaito-e2e-github-runner'] + runs-on: [self-hosted, 'hostname:model-server'] environment: preset-env strategy: fail-fast: false From b5492296ea2d7f964c602ea6c518c6fe779ff998 Mon Sep 17 00:00:00 2001 From: jerryzhuang Date: Wed, 13 Nov 2024 22:48:33 +1100 Subject: [PATCH 5/7] add some preset test for vllm Signed-off-by: jerryzhuang --- .github/e2e-preset-configs.json | 4 +- .github/workflows/e2e-preset-test.yml | 32 +++++++++-- .../{falcon-7b.yaml => falcon-7b_hf.yaml} | 0 .../manifests/falcon-7b/falcon-7b_vllm.yaml | 55 +++++++++++++++++++ ...aml => phi-3-medium-128k-instruct_hf.yaml} | 0 .../phi-3-medium-128k-instruct_vllm.yaml | 55 +++++++++++++++++++ ....yaml => phi-3-medium-4k-instruct_hf.yaml} | 0 .../phi-3-medium-4k-instruct_vllm.yaml | 55 +++++++++++++++++++ ....yaml => phi-3-mini-128k-instruct_hf.yaml} | 0 .../phi-3-mini-128k-instruct_vllm.yaml | 55 +++++++++++++++++++ ...ct.yaml => phi-3-mini-4k-instruct_hf.yaml} | 0 .../phi-3-mini-4k-instruct_vllm.yaml | 55 +++++++++++++++++++ 12 files changed, 303 insertions(+), 8 deletions(-) rename presets/test/manifests/falcon-7b/{falcon-7b.yaml => falcon-7b_hf.yaml} (100%) create mode 100644 presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml rename presets/test/manifests/phi-3-medium-128k-instruct/{phi-3-medium-128k-instruct.yaml => phi-3-medium-128k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml rename presets/test/manifests/phi-3-medium-4k-instruct/{phi-3-medium-4k-instruct.yaml => phi-3-medium-4k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml rename presets/test/manifests/phi-3-mini-128k-instruct/{phi-3-mini-128k-instruct.yaml => phi-3-mini-128k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml rename presets/test/manifests/phi-3-mini-4k-instruct/{phi-3-mini-4k-instruct.yaml => phi-3-mini-4k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml diff --git a/.github/e2e-preset-configs.json b/.github/e2e-preset-configs.json index 75c9e6710..e5fb3e69c 100644 --- a/.github/e2e-preset-configs.json +++ b/.github/e2e-preset-configs.json @@ -70,7 +70,7 @@ "name": "phi-3-mini-4k-instruct", "node-count": 1, "node-vm-size": "Standard_NC6s_v3", - "node-osdisk-size": 50, + "node-osdisk-size": 100, "OSS": true, "loads_adapter": false }, @@ -78,7 +78,7 @@ "name": "phi-3-mini-128k-instruct", "node-count": 1, "node-vm-size": "Standard_NC6s_v3", - "node-osdisk-size": 50, + "node-osdisk-size": 100, "OSS": true, "loads_adapter": false }, diff --git a/.github/workflows/e2e-preset-test.yml b/.github/workflows/e2e-preset-test.yml index d3b49b0e3..df3bc67df 100644 --- a/.github/workflows/e2e-preset-test.yml +++ b/.github/workflows/e2e-preset-test.yml @@ -15,12 +15,17 @@ on: type: boolean default: false description: "Test all Phi models for E2E" + test-on-vllm: + type: boolean + default: false + description: "Test on VLLM runtime" env: GO_VERSION: "1.22" BRANCH_NAME: ${{ github.head_ref || github.ref_name}} FORCE_RUN_ALL: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.force-run-all == 'true' }} FORCE_RUN_ALL_PHI: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.force-run-all-phi-models== 'true' }} + RUNTIME: ${{ (github.event_name == 'workflow_dispatch' && github.event.inputs.test-on-vllm == 'true') && 'vllm' || 'hf' }} permissions: id-token: write @@ -232,7 +237,7 @@ jobs: sed -i "s/MASTER_ADDR_HERE/${{ steps.get_ip.outputs.SERVICE_IP }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml sed -i "s/TAG_HERE/${{ matrix.model.tag }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml sed -i "s/REPO_HERE/${{ secrets.ACR_AMRT_USERNAME }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml - kubectl apply -f presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml + kubectl apply -f presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml - name: Wait for Resource to be ready run: | @@ -243,14 +248,10 @@ jobs: run: | POD_NAME=$(kubectl get pods -l app=${{ matrix.model.name }} -o jsonpath="{.items[0].metadata.name}") kubectl logs $POD_NAME | grep "Adapter added:" | grep "${{ matrix.model.expected_adapter }}" || (echo "Adapter not loaded or incorrect adapter loaded" && exit 1) - - - name: Test home endpoint - run: | - curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/ - name: Test healthz endpoint run: | - curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/healthz + curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/health - name: Test inference endpoint run: | @@ -291,6 +292,25 @@ jobs: } }' \ http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/generate + elif [[ "${{ env.RUNTIME }}" == *"vllm"*]]; then + echo "Testing inference for ${{ matrix.model.name }}" + curl -X POST \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "test", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' \ + http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/v1/chat/completions else echo "Testing inference for ${{ matrix.model.name }}" curl -X POST \ diff --git a/presets/test/manifests/falcon-7b/falcon-7b.yaml b/presets/test/manifests/falcon-7b/falcon-7b_hf.yaml similarity index 100% rename from presets/test/manifests/falcon-7b/falcon-7b.yaml rename to presets/test/manifests/falcon-7b/falcon-7b_hf.yaml diff --git a/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml b/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml new file mode 100644 index 000000000..72087966b --- /dev/null +++ b/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: falcon-7b +spec: + replicas: 1 + selector: + matchLabels: + app: falcon + template: + metadata: + labels: + app: falcon + spec: + containers: + - name: falcon-container + image: REPO_HERE.azurecr.io/falcon-7b:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype bfloat16 --chat-template /workspace/chat_templates/falcon-instruct.jinja + resources: + requests: + nvidia.com/gpu: 2 + limits: + nvidia.com/gpu: 2 # Requesting 2 GPUs + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: falcon7b diff --git a/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct.yaml b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct.yaml rename to presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml new file mode 100644 index 000000000..effca3a1a --- /dev/null +++ b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-medium-128k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-medium-128k-instruct + template: + metadata: + labels: + app: phi-3-medium-128k-instruct + spec: + containers: + - name: phi-3-medium-128k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-medium-128k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3medium12 \ No newline at end of file diff --git a/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct.yaml b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct.yaml rename to presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml new file mode 100644 index 000000000..39a3bb845 --- /dev/null +++ b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-medium-4k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-medium-4k-instruct + template: + metadata: + labels: + app: phi-3-medium-4k-instruct + spec: + containers: + - name: phi-3-medium-4k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-medium-4k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3medium4k \ No newline at end of file diff --git a/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct.yaml b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct.yaml rename to presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml new file mode 100644 index 000000000..f719bf96b --- /dev/null +++ b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-mini-128k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-mini-128k-instruct + template: + metadata: + labels: + app: phi-3-mini-128k-instruct + spec: + containers: + - name: phi-3-mini-128k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-mini-128k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3mini128k \ No newline at end of file diff --git a/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct.yaml b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct.yaml rename to presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml new file mode 100644 index 000000000..8d1275678 --- /dev/null +++ b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-mini-4k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-mini-4k-instruct + template: + metadata: + labels: + app: phi-3-mini-4k-instruct + spec: + containers: + - name: phi-3-mini-4k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-mini-4k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3mini4kin \ No newline at end of file From 5898b4d26a35f9eb0fae7fdd5b0d2b0b9ddf7ff8 Mon Sep 17 00:00:00 2001 From: jerryzhuang Date: Wed, 13 Nov 2024 22:48:33 +1100 Subject: [PATCH 6/7] add some preset test for vllm Signed-off-by: jerryzhuang --- .github/e2e-preset-configs.json | 4 +- .github/workflows/e2e-preset-test.yml | 32 +++++++++-- .../{falcon-7b.yaml => falcon-7b_hf.yaml} | 0 .../manifests/falcon-7b/falcon-7b_vllm.yaml | 55 +++++++++++++++++++ ...aml => phi-3-medium-128k-instruct_hf.yaml} | 0 .../phi-3-medium-128k-instruct_vllm.yaml | 55 +++++++++++++++++++ ....yaml => phi-3-medium-4k-instruct_hf.yaml} | 0 .../phi-3-medium-4k-instruct_vllm.yaml | 55 +++++++++++++++++++ ....yaml => phi-3-mini-128k-instruct_hf.yaml} | 0 .../phi-3-mini-128k-instruct_vllm.yaml | 55 +++++++++++++++++++ ...ct.yaml => phi-3-mini-4k-instruct_hf.yaml} | 0 .../phi-3-mini-4k-instruct_vllm.yaml | 55 +++++++++++++++++++ 12 files changed, 303 insertions(+), 8 deletions(-) rename presets/test/manifests/falcon-7b/{falcon-7b.yaml => falcon-7b_hf.yaml} (100%) create mode 100644 presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml rename presets/test/manifests/phi-3-medium-128k-instruct/{phi-3-medium-128k-instruct.yaml => phi-3-medium-128k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml rename presets/test/manifests/phi-3-medium-4k-instruct/{phi-3-medium-4k-instruct.yaml => phi-3-medium-4k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml rename presets/test/manifests/phi-3-mini-128k-instruct/{phi-3-mini-128k-instruct.yaml => phi-3-mini-128k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml rename presets/test/manifests/phi-3-mini-4k-instruct/{phi-3-mini-4k-instruct.yaml => phi-3-mini-4k-instruct_hf.yaml} (100%) create mode 100644 presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml diff --git a/.github/e2e-preset-configs.json b/.github/e2e-preset-configs.json index 75c9e6710..e5fb3e69c 100644 --- a/.github/e2e-preset-configs.json +++ b/.github/e2e-preset-configs.json @@ -70,7 +70,7 @@ "name": "phi-3-mini-4k-instruct", "node-count": 1, "node-vm-size": "Standard_NC6s_v3", - "node-osdisk-size": 50, + "node-osdisk-size": 100, "OSS": true, "loads_adapter": false }, @@ -78,7 +78,7 @@ "name": "phi-3-mini-128k-instruct", "node-count": 1, "node-vm-size": "Standard_NC6s_v3", - "node-osdisk-size": 50, + "node-osdisk-size": 100, "OSS": true, "loads_adapter": false }, diff --git a/.github/workflows/e2e-preset-test.yml b/.github/workflows/e2e-preset-test.yml index d3b49b0e3..df3bc67df 100644 --- a/.github/workflows/e2e-preset-test.yml +++ b/.github/workflows/e2e-preset-test.yml @@ -15,12 +15,17 @@ on: type: boolean default: false description: "Test all Phi models for E2E" + test-on-vllm: + type: boolean + default: false + description: "Test on VLLM runtime" env: GO_VERSION: "1.22" BRANCH_NAME: ${{ github.head_ref || github.ref_name}} FORCE_RUN_ALL: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.force-run-all == 'true' }} FORCE_RUN_ALL_PHI: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.force-run-all-phi-models== 'true' }} + RUNTIME: ${{ (github.event_name == 'workflow_dispatch' && github.event.inputs.test-on-vllm == 'true') && 'vllm' || 'hf' }} permissions: id-token: write @@ -232,7 +237,7 @@ jobs: sed -i "s/MASTER_ADDR_HERE/${{ steps.get_ip.outputs.SERVICE_IP }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml sed -i "s/TAG_HERE/${{ matrix.model.tag }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml sed -i "s/REPO_HERE/${{ secrets.ACR_AMRT_USERNAME }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml - kubectl apply -f presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml + kubectl apply -f presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml - name: Wait for Resource to be ready run: | @@ -243,14 +248,10 @@ jobs: run: | POD_NAME=$(kubectl get pods -l app=${{ matrix.model.name }} -o jsonpath="{.items[0].metadata.name}") kubectl logs $POD_NAME | grep "Adapter added:" | grep "${{ matrix.model.expected_adapter }}" || (echo "Adapter not loaded or incorrect adapter loaded" && exit 1) - - - name: Test home endpoint - run: | - curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/ - name: Test healthz endpoint run: | - curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/healthz + curl http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/health - name: Test inference endpoint run: | @@ -291,6 +292,25 @@ jobs: } }' \ http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/generate + elif [[ "${{ env.RUNTIME }}" == *"vllm"*]]; then + echo "Testing inference for ${{ matrix.model.name }}" + curl -X POST \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "test", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' \ + http://${{ steps.get_ip.outputs.SERVICE_IP }}:80/v1/chat/completions else echo "Testing inference for ${{ matrix.model.name }}" curl -X POST \ diff --git a/presets/test/manifests/falcon-7b/falcon-7b.yaml b/presets/test/manifests/falcon-7b/falcon-7b_hf.yaml similarity index 100% rename from presets/test/manifests/falcon-7b/falcon-7b.yaml rename to presets/test/manifests/falcon-7b/falcon-7b_hf.yaml diff --git a/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml b/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml new file mode 100644 index 000000000..72087966b --- /dev/null +++ b/presets/test/manifests/falcon-7b/falcon-7b_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: falcon-7b +spec: + replicas: 1 + selector: + matchLabels: + app: falcon + template: + metadata: + labels: + app: falcon + spec: + containers: + - name: falcon-container + image: REPO_HERE.azurecr.io/falcon-7b:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype bfloat16 --chat-template /workspace/chat_templates/falcon-instruct.jinja + resources: + requests: + nvidia.com/gpu: 2 + limits: + nvidia.com/gpu: 2 # Requesting 2 GPUs + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: falcon7b diff --git a/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct.yaml b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct.yaml rename to presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml new file mode 100644 index 000000000..effca3a1a --- /dev/null +++ b/presets/test/manifests/phi-3-medium-128k-instruct/phi-3-medium-128k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-medium-128k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-medium-128k-instruct + template: + metadata: + labels: + app: phi-3-medium-128k-instruct + spec: + containers: + - name: phi-3-medium-128k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-medium-128k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3medium12 \ No newline at end of file diff --git a/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct.yaml b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct.yaml rename to presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml new file mode 100644 index 000000000..39a3bb845 --- /dev/null +++ b/presets/test/manifests/phi-3-medium-4k-instruct/phi-3-medium-4k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-medium-4k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-medium-4k-instruct + template: + metadata: + labels: + app: phi-3-medium-4k-instruct + spec: + containers: + - name: phi-3-medium-4k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-medium-4k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3medium4k \ No newline at end of file diff --git a/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct.yaml b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct.yaml rename to presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml new file mode 100644 index 000000000..f719bf96b --- /dev/null +++ b/presets/test/manifests/phi-3-mini-128k-instruct/phi-3-mini-128k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-mini-128k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-mini-128k-instruct + template: + metadata: + labels: + app: phi-3-mini-128k-instruct + spec: + containers: + - name: phi-3-mini-128k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-mini-128k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3mini128k \ No newline at end of file diff --git a/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct.yaml b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_hf.yaml similarity index 100% rename from presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct.yaml rename to presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_hf.yaml diff --git a/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml new file mode 100644 index 000000000..8d1275678 --- /dev/null +++ b/presets/test/manifests/phi-3-mini-4k-instruct/phi-3-mini-4k-instruct_vllm.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phi-3-mini-4k-instruct +spec: + replicas: 1 + selector: + matchLabels: + app: phi-3-mini-4k-instruct + template: + metadata: + labels: + app: phi-3-mini-4k-instruct + spec: + containers: + - name: phi-3-mini-4k-instruct-container + image: REPO_HERE.azurecr.io/phi-3-mini-4k-instruct:TAG_HERE + command: + - /bin/sh + - -c + - python3 /workspace/vllm/inference_api.py --served-model-name test --dtype float16 + resources: + requests: + nvidia.com/gpu: 1 + limits: + nvidia.com/gpu: 1 # Requesting 1 GPU + livenessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 600 # 10 Min + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 5000 + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - name: dshm + mountPath: /dev/shm + volumes: + - name: dshm + emptyDir: + medium: Memory + tolerations: + - effect: NoSchedule + key: sku + operator: Equal + value: gpu + - effect: NoSchedule + key: nvidia.com/gpu + operator: Exists + nodeSelector: + pool: phi3mini4kin \ No newline at end of file From 2519b06bc29f9db6f403aa9ea4f07f4538ed21d5 Mon Sep 17 00:00:00 2001 From: jerryzhuang Date: Wed, 13 Nov 2024 22:59:43 +1100 Subject: [PATCH 7/7] fix Signed-off-by: jerryzhuang --- .github/workflows/e2e-preset-test.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e-preset-test.yml b/.github/workflows/e2e-preset-test.yml index df3bc67df..0e6ce085a 100644 --- a/.github/workflows/e2e-preset-test.yml +++ b/.github/workflows/e2e-preset-test.yml @@ -234,9 +234,9 @@ jobs: - name: Replace IP and Deploy Resource to K8s run: | - sed -i "s/MASTER_ADDR_HERE/${{ steps.get_ip.outputs.SERVICE_IP }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml - sed -i "s/TAG_HERE/${{ matrix.model.tag }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml - sed -i "s/REPO_HERE/${{ secrets.ACR_AMRT_USERNAME }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}.yaml + sed -i "s/MASTER_ADDR_HERE/${{ steps.get_ip.outputs.SERVICE_IP }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml + sed -i "s/TAG_HERE/${{ matrix.model.tag }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml + sed -i "s/REPO_HERE/${{ secrets.ACR_AMRT_USERNAME }}/g" presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml kubectl apply -f presets/test/manifests/${{ matrix.model.name }}/${{ matrix.model.name }}_${{ env.RUNTIME }}.yaml - name: Wait for Resource to be ready