From 68266132e4112e92dcdaea513a67bcf7448c8c70 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Thu, 9 Jan 2025 14:41:42 +0000 Subject: [PATCH 1/4] Auto-update of LLM Finetuning template --- examples/llm_finetuning/steps/promote.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/llm_finetuning/steps/promote.py b/examples/llm_finetuning/steps/promote.py index 9d5b2e40db..30333e0ad8 100644 --- a/examples/llm_finetuning/steps/promote.py +++ b/examples/llm_finetuning/steps/promote.py @@ -54,11 +54,11 @@ def promote( msg = ( f"`{metric}` values to compare:\n" - f"base={base_metrics[metric]*100:.2f}%\n" - f"finetuned={ft_metrics[metric]*100:.2f}%" + f"base={base_metrics[metric] * 100:.2f}%\n" + f"finetuned={ft_metrics[metric] * 100:.2f}%" ) if staging_metrics: - msg += f"\nstaging={staging_metrics[metric]*100:.2f}%" + msg += f"\nstaging={staging_metrics[metric] * 100:.2f}%" logger.info(msg) if base_metrics[metric] <= ft_metrics[metric]: From dcfb03e8584f44637ed6afe04cda40536ecda9b4 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Thu, 9 Jan 2025 14:42:13 +0000 Subject: [PATCH 2/4] Auto-update of Starter template --- examples/mlops_starter/steps/model_promoter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mlops_starter/steps/model_promoter.py b/examples/mlops_starter/steps/model_promoter.py index ca73c472f4..0c570488d2 100644 --- a/examples/mlops_starter/steps/model_promoter.py +++ b/examples/mlops_starter/steps/model_promoter.py @@ -44,7 +44,7 @@ def model_promoter(accuracy: float, stage: str = "production") -> bool: if accuracy < 0.8: logger.info( - f"Model accuracy {accuracy*100:.2f}% is below 80% ! Not promoting model." + f"Model accuracy {accuracy * 100:.2f}% is below 80% ! Not promoting model." ) else: logger.info(f"Model promoted to {stage}!") From eb96d953f07ecbf0f4c78c78c569d0d012a7f9d0 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Thu, 9 Jan 2025 14:49:14 +0000 Subject: [PATCH 3/4] Auto-update of E2E template --- examples/e2e/steps/deployment/deployment_deploy.py | 10 ++++------ examples/e2e/steps/training/model_evaluator.py | 8 ++++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/examples/e2e/steps/deployment/deployment_deploy.py b/examples/e2e/steps/deployment/deployment_deploy.py index 2895d35fcb..07395edd5d 100644 --- a/examples/e2e/steps/deployment/deployment_deploy.py +++ b/examples/e2e/steps/deployment/deployment_deploy.py @@ -34,12 +34,10 @@ @step -def deployment_deploy() -> ( - Annotated[ - Optional[MLFlowDeploymentService], - ArtifactConfig(name="mlflow_deployment", is_deployment_artifact=True), - ] -): +def deployment_deploy() -> Annotated[ + Optional[MLFlowDeploymentService], + ArtifactConfig(name="mlflow_deployment", is_deployment_artifact=True), +]: """Predictions step. This is an example of a predictions step that takes the data in and returns diff --git a/examples/e2e/steps/training/model_evaluator.py b/examples/e2e/steps/training/model_evaluator.py index 64fac77295..60f4ded534 100644 --- a/examples/e2e/steps/training/model_evaluator.py +++ b/examples/e2e/steps/training/model_evaluator.py @@ -82,22 +82,22 @@ def model_evaluator( dataset_trn.drop(columns=[target]), dataset_trn[target], ) - logger.info(f"Train accuracy={trn_acc*100:.2f}%") + logger.info(f"Train accuracy={trn_acc * 100:.2f}%") tst_acc = model.score( dataset_tst.drop(columns=[target]), dataset_tst[target], ) - logger.info(f"Test accuracy={tst_acc*100:.2f}%") + logger.info(f"Test accuracy={tst_acc * 100:.2f}%") mlflow.log_metric("testing_accuracy_score", tst_acc) messages = [] if trn_acc < min_train_accuracy: messages.append( - f"Train accuracy {trn_acc*100:.2f}% is below {min_train_accuracy*100:.2f}% !" + f"Train accuracy {trn_acc * 100:.2f}% is below {min_train_accuracy * 100:.2f}% !" ) if tst_acc < min_test_accuracy: messages.append( - f"Test accuracy {tst_acc*100:.2f}% is below {min_test_accuracy*100:.2f}% !" + f"Test accuracy {tst_acc * 100:.2f}% is below {min_test_accuracy * 100:.2f}% !" ) if fail_on_accuracy_quality_gates and messages: raise RuntimeError( From 9542ed8bb433bce7baae6e482230a61a7c5cee42 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Thu, 9 Jan 2025 14:52:15 +0000 Subject: [PATCH 4/4] Auto-update of NLP template --- .../e2e_nlp/steps/deploying/huggingface_deployment.py | 4 +++- .../e2e_nlp/steps/promotion/promote_get_metrics.py | 10 ++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/e2e_nlp/steps/deploying/huggingface_deployment.py b/examples/e2e_nlp/steps/deploying/huggingface_deployment.py index 222e813a3a..7c743ce732 100644 --- a/examples/e2e_nlp/steps/deploying/huggingface_deployment.py +++ b/examples/e2e_nlp/steps/deploying/huggingface_deployment.py @@ -39,7 +39,9 @@ def deploy_to_huggingface( """ ### ADD YOUR OWN CODE HERE - THIS IS JUST AN EXAMPLE ### secret = Client().get_secret("huggingface_creds") - assert secret, "No secret found with name 'huggingface_creds'. Please create one that includes your `username` and `token`." + assert secret, ( + "No secret found with name 'huggingface_creds'. Please create one that includes your `username` and `token`." + ) token = secret.secret_values["token"] api = HfApi(token=token) hf_repo = api.create_repo( diff --git a/examples/e2e_nlp/steps/promotion/promote_get_metrics.py b/examples/e2e_nlp/steps/promotion/promote_get_metrics.py index 8c8220bc71..eec6eb526f 100644 --- a/examples/e2e_nlp/steps/promotion/promote_get_metrics.py +++ b/examples/e2e_nlp/steps/promotion/promote_get_metrics.py @@ -30,12 +30,10 @@ @step -def promote_get_metrics() -> ( - Tuple[ - Annotated[Dict[str, Any], "latest_metrics"], - Annotated[Dict[str, Any], "current_metrics`"], - ] -): +def promote_get_metrics() -> Tuple[ + Annotated[Dict[str, Any], "latest_metrics"], + Annotated[Dict[str, Any], "current_metrics`"], +]: """Get metrics for comparison for promoting a model. This is an example of a metric retrieval step. It is used to retrieve