-
Notifications
You must be signed in to change notification settings - Fork 9
/
test.py
74 lines (63 loc) · 2.96 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import argparse
import json
import os
import torch
from dm_control import suite
from dm_control.suite.wrappers import pixels
from agent import CEMAgent
from model import Encoder, RecurrentStateSpaceModel, RewardModel
from wrappers import GymWrapper, RepeatAction
def main():
parser = argparse.ArgumentParser(description='Test learned model')
parser.add_argument('dir', type=str, help='log directory to load learned model')
parser.add_argument('--render', action='store_true')
parser.add_argument('--domain-name', type=str, default='cheetah')
parser.add_argument('--task-name', type=str, default='run')
parser.add_argument('-R', '--action-repeat', type=int, default=4)
parser.add_argument('--episodes', type=int, default=1)
parser.add_argument('-H', '--horizon', type=int, default=12)
parser.add_argument('-I', '--N-iterations', type=int, default=10)
parser.add_argument('-J', '--N-candidates', type=int, default=1000)
parser.add_argument('-K', '--N-top-candidates', type=int, default=100)
args = parser.parse_args()
# define environment and apply wrapper
env = suite.load(args.domain_name, args.task_name)
env = pixels.Wrapper(env, render_kwargs={'height': 64,
'width': 64,
'camera_id': 0})
env = GymWrapper(env)
env = RepeatAction(env, skip=args.action_repeat)
# define models
with open(os.path.join(args.dir, 'args.json'), 'r') as f:
train_args = json.load(f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
encoder = Encoder().to(device)
rssm = RecurrentStateSpaceModel(train_args['state_dim'],
env.action_space.shape[0],
train_args['rnn_hidden_dim']).to(device)
reward_model = RewardModel(train_args['state_dim'],
train_args['rnn_hidden_dim']).to(device)
# load learned parameters
encoder.load_state_dict(torch.load(os.path.join(args.dir, 'encoder.pth')))
rssm.load_state_dict(torch.load(os.path.join(args.dir, 'rssm.pth')))
reward_model.load_state_dict(torch.load(os.path.join(args.dir, 'reward_model.pth')))
# define agent
cem_agent = CEMAgent(encoder, rssm, reward_model,
args.horizon, args.N_iterations,
args.N_candidates, args.N_top_candidates)
# test learnged model in the environment
for episode in range(args.episodes):
cem_agent.reset()
obs = env.reset()
done = False
total_reward = 0
while not done:
action = cem_agent(obs)
obs, reward, done, _ = env.step(action)
total_reward += reward
if args.render:
env.render(height=256, width=256, camera_id=0)
print('Total test reward at episode [%4d/%4d] is %f' %
(episode+1, args.episodes, total_reward))
if __name__ == '__main__':
main()