From ea831440c885d8b341bb732f8c4b78a664064d1f Mon Sep 17 00:00:00 2001 From: Ishaan Sehgal Date: Thu, 14 Mar 2024 12:14:51 -0700 Subject: [PATCH] feat: Bump model versions (#299) --- presets/models/falcon/model.go | 8 ++++---- presets/models/mistral/model.go | 12 ++++++------ presets/models/phi/model.go | 6 +++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/presets/models/falcon/model.go b/presets/models/falcon/model.go index 00dfd0d77..7501dce23 100644 --- a/presets/models/falcon/model.go +++ b/presets/models/falcon/model.go @@ -37,10 +37,10 @@ var ( PresetFalcon40BInstructModel = PresetFalcon40BModel + "-instruct" PresetFalconTagMap = map[string]string{ - "Falcon7B": "0.0.2", - "Falcon7BInstruct": "0.0.2", - "Falcon40B": "0.0.2", - "Falcon40BInstruct": "0.0.2", + "Falcon7B": "0.0.3", + "Falcon7BInstruct": "0.0.3", + "Falcon40B": "0.0.3", + "Falcon40BInstruct": "0.0.3", } baseCommandPresetFalcon = "accelerate launch" diff --git a/presets/models/mistral/model.go b/presets/models/mistral/model.go index 3f1d79d15..7089eafb6 100644 --- a/presets/models/mistral/model.go +++ b/presets/models/mistral/model.go @@ -23,12 +23,12 @@ func init() { } var ( - PresetMistral7BModel = "mistral-7b" - PresetMistral7BInstructModel = PresetMistral7BModel + "-instruct" + PresetMistral7BModel = "mistral-7b" + PresetMistral7BInstructModel = PresetMistral7BModel + "-instruct" PresetMistralTagMap = map[string]string{ - "Mistral7B": "0.0.2", - "Mistral7BInstruct": "0.0.2", + "Mistral7B": "0.0.3", + "Mistral7BInstruct": "0.0.3", } baseCommandPresetMistral = "accelerate launch" @@ -46,7 +46,7 @@ func (*mistral7b) GetInferenceParameters() *model.PresetInferenceParam { return &model.PresetInferenceParam{ ModelFamilyName: "Mistral", ImageAccessMode: string(kaitov1alpha1.ModelImageAccessModePublic), - DiskStorageRequirement: "50Gi", + DiskStorageRequirement: "100Gi", GPUCountRequirement: "1", TotalGPUMemoryRequirement: "14Gi", PerGPUMemoryRequirement: "0Gi", // We run Mistral using native vertical model parallel, no per GPU memory requirement. @@ -70,7 +70,7 @@ func (*mistral7bInst) GetInferenceParameters() *model.PresetInferenceParam { return &model.PresetInferenceParam{ ModelFamilyName: "Mistral", ImageAccessMode: string(kaitov1alpha1.ModelImageAccessModePublic), - DiskStorageRequirement: "50Gi", + DiskStorageRequirement: "100Gi", GPUCountRequirement: "1", TotalGPUMemoryRequirement: "16Gi", PerGPUMemoryRequirement: "0Gi", // We run mistral using native vertical model parallel, no per GPU memory requirement. diff --git a/presets/models/phi/model.go b/presets/models/phi/model.go index e819256b6..2e54dce38 100644 --- a/presets/models/phi/model.go +++ b/presets/models/phi/model.go @@ -19,10 +19,10 @@ func init() { } var ( - PresetPhi2Model = "phi-2" + PresetPhi2Model = "phi-2" PresetPhiTagMap = map[string]string{ - "Phi2": "0.0.1", + "Phi2": "0.0.2", } baseCommandPresetPhi = "accelerate launch" @@ -40,7 +40,7 @@ func (*phi2) GetInferenceParameters() *model.PresetInferenceParam { return &model.PresetInferenceParam{ ModelFamilyName: "Phi", ImageAccessMode: string(kaitov1alpha1.ModelImageAccessModePublic), - DiskStorageRequirement: "30Gi", + DiskStorageRequirement: "50Gi", GPUCountRequirement: "1", TotalGPUMemoryRequirement: "12Gi", PerGPUMemoryRequirement: "0Gi", // We run Phi using native vertical model parallel, no per GPU memory requirement.