Skip to content

Commit

Permalink
Fix eval and loss weights (#138)
Browse files Browse the repository at this point in the history
* fix eval

* reward offset

* sample weights and loss weights

* style
  • Loading branch information
qgallouedec authored Nov 21, 2023
1 parent fd6913f commit 5b36e2f
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 42 deletions.
4 changes: 1 addition & 3 deletions data/to_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,7 @@ def add_dataset_to_hub(

path_in_repo = f"data/{domain}/{task}/"
commit_message = f"adds {domain} {task} {n_episodes=}"
HfApi().create_repo(
repo_id="gia-project/gia-dataset", private=False, exist_ok=True, repo_type="dataset"
)
HfApi().create_repo(repo_id="gia-project/gia-dataset", private=False, exist_ok=True, repo_type="dataset")

upload_folder(
repo_id="gia-project/gia-dataset",
Expand Down
20 changes: 10 additions & 10 deletions gia/eval/rl/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,12 +190,12 @@ class AtariDictObservationWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = spaces.Dict(
{"image_observations": spaces.Box(low=0, high=255, shape=(84, 84, 4), dtype=np.uint8)}
{"image_observation": spaces.Box(low=0, high=255, shape=(84, 84, 4), dtype=np.uint8)}
)

def observation(self, observation):
observations = np.transpose(observation, (1, 2, 0)) # make channel last
return {"image_observations": observations}
observation = np.transpose(observation, (1, 2, 0)) # make channel last
return {"image_observation": observation}


def make_atari(task_name: str, episodic_life: bool = True, clip_reward: bool = True, **kwargs) -> Env:
Expand Down Expand Up @@ -234,16 +234,16 @@ def __init__(self, env: Env) -> None:
n_direction = self.observation_space["direction"].n
self.observation_space = spaces.Dict(
{
"text_observations": env.observation_space.spaces["mission"],
"discrete_observations": spaces.MultiDiscrete([n_direction, *n_image]),
"text_observation": env.observation_space.spaces["mission"],
"discrete_observation": spaces.MultiDiscrete([n_direction, *n_image]),
}
)

def observation(self, observation: Dict[str, np.ndarray]):
discrete_observations = np.append(observation["direction"], observation["image"].flatten())
discrete_observation = np.append(observation["direction"], observation["image"].flatten())
return {
"text_observations": observation["mission"],
"discrete_observations": discrete_observations,
"text_observation": observation["mission"],
"discrete_observation": discrete_observation,
}


Expand All @@ -263,10 +263,10 @@ def make_babyai(task_name: str, **kwargs) -> Env:
class ContinuousObservationDictWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = spaces.Dict({"continuous_observations": env.observation_space})
self.observation_space = spaces.Dict({"continuous_observation": env.observation_space})

def observation(self, observation):
return {"continuous_observations": observation}
return {"continuous_observation": observation}


def make_metaworld(task_name: str, **kwargs) -> Env:
Expand Down
43 changes: 19 additions & 24 deletions gia/modeling_gia.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,6 @@ def embed_rl(
# Prepare RL inputs (pad and cat rewards to observations)
assert rewards is not None
if continuous_observations is not None:
# Modify the rewards to move from [r_1, r_2, ..., r_T] to [0, r_1, r_2, ..., r_T-1]
rewards = torch.cat((torch.zeros_like(rewards[:, :1]), rewards[:, :-1]), dim=1)
continuous_observations = torch.cat((continuous_observations, rewards.unsqueeze(-1)), dim=-1)
continuous_observations = cyclic_expand_dim(continuous_observations, self.config.max_continuous_size)
if continuous_actions is not None:
Expand All @@ -448,8 +446,6 @@ def embed_rl(
elif discrete_observations is not None:
batch_size, seq_len = discrete_observations.shape[:2]
inputs_embeds_observations = self.multi_discrete_encoder(discrete_observations)
# Modify the rewards to move from [r_1, r_2, ..., r_T] to [0, r_1, r_2, ..., r_T-1]
rewards = torch.cat((torch.zeros_like(rewards[:, :1]), rewards[:, :-1]), dim=1)
inputs_embeds_observations = torch.cat((inputs_embeds_observations, rewards.unsqueeze(-1)), dim=-1)
elif image_observations is not None:
batch_size, seq_len = image_observations.shape[:2]
Expand Down Expand Up @@ -532,9 +528,7 @@ def output_rl(
assert rewards is not None
observations_mask = attention_mask[:, 1::2] if attention_mask is not None else None
if continuous_observations is not None:
# Modify the rewards to move from [r_1, r_2, ..., r_T] to [0, r_1, r_2, ..., r_T-1]
obs_size = continuous_observations.shape[-1]
rewards = torch.cat((torch.zeros_like(rewards[:, :1]), rewards[:, :-1]), dim=1)
continuous_observations = torch.cat((continuous_observations, rewards.unsqueeze(-1)), dim=-1)
continuous_observations = cyclic_expand_dim(continuous_observations, self.config.max_continuous_size)
pred_observations = self.continuous_decoder(hidden_states[:, 1::2])
Expand Down Expand Up @@ -690,12 +684,12 @@ def reset_rl(self):
def get_next_action(
self,
processor: GiaProcessor,
continuous_observations: Optional[List[float]] = None,
discrete_observations: Optional[List[int]] = None,
text_observations: Optional[str] = None,
image_observations: Optional[np.ndarray] = None,
continuous_observation: Optional[List[float]] = None,
discrete_observation: Optional[List[int]] = None,
text_observation: Optional[str] = None,
image_observation: Optional[np.ndarray] = None,
action_space: Union[spaces.Box, spaces.Discrete] = None,
rewards: Optional[float] = None,
reward: Optional[float] = None,
deterministic: bool = False,
):
# Get the maximum sequence length
Expand All @@ -705,26 +699,27 @@ def get_next_action(
def to_list(x):
return x.tolist() if isinstance(x, np.ndarray) else x

continuous_observations = to_list(continuous_observations)
discrete_observations = to_list(discrete_observations)
continuous_observation = to_list(continuous_observation)
discrete_observation = to_list(discrete_observation)

# Add a fake action to the end of the sequence
if isinstance(action_space, spaces.Box):
fake_continuous_actions = [0.0 for _ in range(action_space.shape[0])]
fake_discrete_actions = None
fake_continuous_action = [0.0 for _ in range(action_space.shape[0])]
fake_discrete_action = None
elif isinstance(action_space, spaces.Discrete):
fake_continuous_actions = None
fake_discrete_actions = 0
fake_continuous_action = None
fake_discrete_action = 0

continuous_observations = [continuous_observations] if continuous_observations is not None else None
discrete_observations = [discrete_observations] if discrete_observations is not None else None
text_observations = [text_observations] if text_observations is not None else None
image_observations = [image_observations] if image_observations is not None else None
continuous_actions = [fake_continuous_actions] if fake_continuous_actions is not None else None
discrete_actions = [fake_discrete_actions] if fake_discrete_actions is not None else None
continuous_observations = [continuous_observation] if continuous_observation is not None else None
discrete_observations = [discrete_observation] if discrete_observation is not None else None
text_observations = [text_observation] if text_observation is not None else None
image_observations = [image_observation] if image_observation is not None else None
continuous_actions = [fake_continuous_action] if fake_continuous_action is not None else None
discrete_actions = [fake_discrete_action] if fake_discrete_action is not None else None
rewards = [reward] if reward is not None else [0.0]

if self._last_key_values is not None:
assert rewards is not None # rewards must be provided, except for the first step
assert reward is not None # rewards must be provided, except for the first step
# We concatenate the last observation with the current one
continuous_observations = (
[self.last_continuous_observation] + continuous_observations
Expand Down
2 changes: 1 addition & 1 deletion scripts/eval_gia.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def eval_rl(model, processor, task, eval_args):
done = False
model.reset_rl() # remove KV Cache
while not done:
action = model.get_next_action(processor, **observation, rewards=reward, action_space=env.action_space)
action = model.get_next_action(processor, **observation, reward=reward, action_space=env.action_space)
observation, reward, termined, truncated, info = env.step(action)
done = termined or truncated

Expand Down
20 changes: 16 additions & 4 deletions scripts/train_gia.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,15 @@ class DataTrainingArguments:


LOSS_WEIGHTS = {
"mujoco-pendulum": 20.0,
"mujoco-doublependulum": 10.0,
**{task: 10.0 for task in TASK_NAME_TO_ENV_ID.keys() if task.startswith("mujoco")},
**{task: 50.0 for task in TASK_NAME_TO_ENV_ID.keys() if task.startswith("metaworld")},
"mujoco-pendulum": 50.0,
"mujoco-doublependulum": 20.0,
}
SAMPLE_WEIGHTS = {
# "oscar": 10.0,
# "conceptual_caption": 10.0,
"conceptual-captions": 10.0,
"oscar": 10.0,
"wikipedia": 10.0,
}

os.environ["WANDB_ENTITY"] = "gia-project"
Expand Down Expand Up @@ -147,6 +150,15 @@ def main():
dataset = dataset_dict[task][split]
column_names = set(dataset.column_names) # need to be done here because this info is lost after the map
dataset = dataset.filter(lambda example: example.get("rewards") != [])

# Add an initial 0 reward and remove the last reward
def add_initial_reward(example):
if "rewards" in example:
example["rewards"] = [0.0] + example["rewards"][:-1]
return example

dataset = dataset.map(add_initial_reward)

# We've shown that reducing the sequence length for atari doesn't impact performance but allows for a
# larger global batch size
max_length = 64 if task.startswith("atari") else None
Expand Down

0 comments on commit 5b36e2f

Please sign in to comment.