From c64c70564e5c3e5efb443fc63b7647e9db392d23 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 01:16:01 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- examples/llm/g_retriever.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/llm/g_retriever.py b/examples/llm/g_retriever.py index 02faca26b577..e0a57bf3edde 100644 --- a/examples/llm/g_retriever.py +++ b/examples/llm/g_retriever.py @@ -185,14 +185,14 @@ def inference_step(model, batch, model_save_name): def train( - num_epochs, # Total number of training epochs - hidden_channels, # Number of hidden channels in GNN - num_gnn_layers, # Number of GNN layers - batch_size, # Training batch size - eval_batch_size, # Evaluation batch size - lr, # Initial learning rate - checkpointing=False, # Whether to checkpoint model - tiny_llama=False, # Whether to use tiny LLaMA model + num_epochs, # Total number of training epochs + hidden_channels, # Number of hidden channels in GNN + num_gnn_layers, # Number of GNN layers + batch_size, # Training batch size + eval_batch_size, # Evaluation batch size + lr, # Initial learning rate + checkpointing=False, # Whether to checkpoint model + tiny_llama=False, # Whether to use tiny LLaMA model ): """Train a GNN+LLM model on WebQSP dataset.