diff --git a/README.md b/README.md index f715b23e..002f3f99 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,8 @@ model_mlp = odak.learn.models.multi_layer_perceptron( bias = False, model_type = 'conventional' ) + + optimizer = torch.optim.AdamW(model_mlp.parameters(), lr = 1e-3) loss_function = torch.nn.MSELoss() for epoch in range(10000): @@ -138,6 +140,7 @@ for epoch in range(10000): optimizer.step() print('Training loss: {}'.format(loss.item())) + for item_id, item in enumerate(pos_x1): torch.no_grad() ground_truth = x1[item_id]