-
Notifications
You must be signed in to change notification settings - Fork 80
/
base.yaml
48 lines (42 loc) · 1.49 KB
/
base.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# @package _global_
# Example configuration for experimenting. Trains the Attention Model on
# the TSP environment with 50 locations via REINFORCE with greedy rollout baseline.
# You may find comments on the most common hyperparameters below.
# Override defaults: take configs from relative path
defaults:
- override /model: am.yaml
- override /env: tsp.yaml
- override /callbacks: default.yaml
- override /trainer: default.yaml
# - override /logger: null # comment this line to enable logging
- override /logger: wandb.yaml
# Environment configuration
# Note that here we load by default the `.npz` files for the TSP environment
# that are automatically generated with seed following Kool et al. (2019).
env:
generator_params:
num_loc: 50
check_solution: False # optimization
# Logging: we use Wandb in this case
logger:
wandb:
project: "rl4co"
tags: ["am", "tsp"]
group: "tsp${env.generator_params.num_loc}"
name: "am-tsp${env.generator_params.num_loc}"
# Model: this contains the environment (which gets automatically passed to the model on
# initialization), the policy network and other hyperparameters.
# This is a `LightningModule` and can be trained with PyTorch Lightning.
model:
batch_size: 512
val_batch_size: 1024
test_batch_size: 1024
train_data_size: 1_280_000
val_data_size: 10_000
test_data_size: 10_000
optimizer_kwargs:
lr: 1e-4
# Trainer: this is a customized version of the PyTorch Lightning trainer.
trainer:
max_epochs: 100
seed: 1234