Skip to content

Commit

Permalink
first version
Browse files Browse the repository at this point in the history
  • Loading branch information
lgan31 committed Jun 18, 2024
1 parent 0b84ed6 commit 20693f7
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 27 deletions.
7 changes: 5 additions & 2 deletions research/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,14 @@
def main():
# Set model types
model_type = 'mlp'
# model_type = 'heterogeneous_gnn'

# Give path to checkpoint
# ================================= FILL THESE IN ===================================
model_directory_name = 'mlp-Courtney-Davis' # Ex: 'gnn-Jared-Wright'
ckpt_file_name = 'epoch=249-val_MSE_loss=173.60436.ckpt' # Ex: 'epoch=0-val_loss=6544.70.ckpt'
model_directory_name = 'mlp-Charles-Voorhees' # Ex: 'gnn-Jared-Wright'
ckpt_file_name = 'epoch=89-val_MSE_loss=193.82160.ckpt' # Ex: 'epoch=0-val_loss=6544.70.ckpt'
# model_directory_name = 'heterogeneous_gnn-Mark-Wallace' # Ex: 'gnn-Jared-Wright'
# ckpt_file_name = 'epoch=109-val_MSE_loss=90.57379.ckpt' # Ex: 'epoch=0-val_loss=6544.70.ckpt'
# ===================================================================================
path_to_checkpoint = Path(
Path('.').parent, 'models', model_directory_name, ckpt_file_name)
Expand Down
2 changes: 1 addition & 1 deletion research/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def main():
path_to_quad_sdk_15Flipped = Path(
Path('.').parent, 'datasets', 'QuadSDK-A1Speed1.5FlippedOver').absolute()

model_type = 'mlp'
model_type = 'gnn'

# Initalize the datasets
dataset_05 = QuadSDKDataset_A1Speed0_5(
Expand Down
34 changes: 17 additions & 17 deletions src/grfgnn/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def __init__(self,
# The longer the history, technically the lower number of total dataset entries
# we can use, and thus a lower dataset length.
self.history_length = history_length
self.length = int(data[0]) - (self.history_length*10 - 10)
self.length = int(data[0]) - (self.history_length - 1)
if self.length <= 0:
raise ValueError(
"Dataset has too few entries for the provided 'history_length'."
Expand Down Expand Up @@ -481,7 +481,7 @@ def get_helper_mlp(self, idx):

# Load the rosbag information
for i in range(0, self.history_length):
lin_acc, ang_vel, positions, velocities, torques, ground_truth_labels = self.load_data_sorted(self.first_index + idx + i*10)
lin_acc, ang_vel, positions, velocities, torques, ground_truth_labels = self.load_data_sorted(self.first_index + idx + i)
tensor = torch.tensor((lin_acc + ang_vel + positions + velocities + torques), dtype=torch.float64).unsqueeze(0)
if x is None:
x = tensor
Expand All @@ -491,18 +491,18 @@ def get_helper_mlp(self, idx):
x = torch.flatten(torch.transpose(x, 0, 1), 0, 1)

# Create the ground truth lables
lin_acc, ang_vel, positions, velocities, torques, ground_truth_labels = self.load_data_sorted(self.first_index + idx + self.history_length*10 - 10)
lin_acc, ang_vel, positions, velocities, torques, ground_truth_labels = self.load_data_sorted(self.first_index + idx + self.history_length - 1)
y = torch.tensor(ground_truth_labels, dtype=torch.float64)
return x, y

def get_helper_gnn(self, idx):
# Create a note feature matrix
x = torch.ones((self.robotGraph.get_num_nodes(), 4 * self.history_length), dtype=torch.float64)
x = torch.ones((self.robotGraph.get_num_nodes(), 3 * self.history_length), dtype=torch.float64)

# For each dataset entry we include in the history
for j in range(0, self.history_length):
# Load the data for this entry
lin_acc, ang_vel, positions, velocities, torques, z_grfs, ang_acc, joint_acc = self.load_data_sorted(self.first_index + idx + j)
lin_acc, ang_vel, positions, velocities, torques, z_grfs = self.load_data_sorted(self.first_index + idx + j)

# For each joint specified
for i, urdf_node_name in enumerate(self.joint_nodes_for_attributes):
Expand All @@ -513,16 +513,16 @@ def get_helper_gnn(self, idx):
# Add the features to x matrix
x[node_index, 0*self.history_length+j] = positions[i]
x[node_index, 1*self.history_length+j] = velocities[i]
x[node_index, 2*self.history_length+j] = joint_acc[i]
x[node_index, 3*self.history_length+j] = torques[i]
# x[node_index, 2*self.history_length+j] = joint_acc[i]
x[node_index, 2*self.history_length+j] = torques[i]

# Create the edge matrix
self.edge_matrix = self.robotGraph.get_edge_index_matrix()
self.edge_matrix_tensor = torch.tensor(self.edge_matrix,
dtype=torch.long)

# Create the labels
lin_acc, ang_vel, positions, velocities, torques, z_grfs, ang_acc, joint_acc = self.load_data_sorted(
lin_acc, ang_vel, positions, velocities, torques, z_grfs = self.load_data_sorted(
self.first_index + idx + self.history_length - 1)
y = torch.tensor(z_grfs, dtype=torch.float64)

Expand Down Expand Up @@ -562,21 +562,21 @@ def get_helper_heterogeneous_gnn(self, idx):
'foot'].edge_attr = torch.tensor(jf_attr, dtype=torch.float64)

# Save the labels and number of nodes
lin_acc, ang_vel, positions, velocities, torques, z_grfs, ang_acc, joint_acc = self.load_data_sorted(
lin_acc, ang_vel, positions, velocities, torques, z_grfs = self.load_data_sorted(
self.first_index + idx + self.history_length - 1)
data.y = torch.tensor(z_grfs, dtype=torch.float64)
data.num_nodes = self.robotGraph.get_num_nodes()

# Create the feature matrices
number_nodes = self.robotGraph.get_num_of_each_node_type()
base_x = torch.ones((number_nodes[0], 9 * self.history_length), dtype=torch.float64)
joint_x = torch.ones((number_nodes[1], 4 * self.history_length), dtype=torch.float64)
base_x = torch.ones((number_nodes[0], 6 * self.history_length), dtype=torch.float64)
joint_x = torch.ones((number_nodes[1], 3 * self.history_length), dtype=torch.float64)
foot_x = torch.ones((number_nodes[2], 1), dtype=torch.float64)

# For each dataset entry we include in the history
for j in range(0, self.history_length):
# Load the data for this entry
lin_acc, ang_vel, positions, velocities, torques, z_grfs, ang_acc, joint_acc = self.load_data_sorted(self.first_index + idx + j)
lin_acc, ang_vel, positions, velocities, torques, z_grfs = self.load_data_sorted(self.first_index + idx + j)

# For each joint specified
for i, urdf_node_name in enumerate(self.joint_nodes_for_attributes):
Expand All @@ -587,8 +587,8 @@ def get_helper_heterogeneous_gnn(self, idx):
# Add the features to x matrix
joint_x[node_index, 0*self.history_length+j] = positions[i]
joint_x[node_index, 1*self.history_length+j] = velocities[i]
joint_x[node_index, 2*self.history_length+j] = joint_acc[i]
joint_x[node_index, 3*self.history_length+j] = torques[i]
# joint_x[node_index, 2*self.history_length+j] = joint_acc[i]
joint_x[node_index, 2*self.history_length+j] = torques[i]

# For each base specified (should be 1)
for urdf_node_name in self.base_nodes_for_attributes:
Expand All @@ -603,9 +603,9 @@ def get_helper_heterogeneous_gnn(self, idx):
base_x[node_index, 3*self.history_length+j] = ang_vel[0]
base_x[node_index, 4*self.history_length+j] = ang_vel[1]
base_x[node_index, 5*self.history_length+j] = ang_vel[2]
base_x[node_index, 6*self.history_length+j] = ang_acc[0]
base_x[node_index, 7*self.history_length+j] = ang_acc[1]
base_x[node_index, 8*self.history_length+j] = ang_acc[2]
# base_x[node_index, 6*self.history_length+j] = ang_acc[0]
# base_x[node_index, 7*self.history_length+j] = ang_acc[1]
# base_x[node_index, 8*self.history_length+j] = ang_acc[2]

# Save the matrices into the HeteroData object
data['base'].x = base_x
Expand Down
15 changes: 8 additions & 7 deletions src/grfgnn/gnnLightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,15 @@ def __init__(self, hidden_channels, edge_dim, num_layers, out_channels,
# Create the final linear layer (Decoder) -> Just for nodes of type "foot"
# Meant to calculate the final GRF values
self.decoder = Linear(hidden_channels, out_channels)
self.silu = nn.SiLU()

def forward(self, x_dict, edge_index_dict):
x_dict = self.encoder(x_dict)
x_dict = {key: x.relu() for key, x in x_dict.items()}
x_dict = {key: self.silu(x) for key, x in x_dict.items()}
for conv in self.convs:
# TODO: Does the RELU actually work?
x_dict = conv(x_dict, edge_index_dict)
x_dict = {key: x.relu() for key, x in x_dict.items()}
x_dict = {key: self.silu(x) for key, x in x_dict.items()}
return self.decoder(x_dict['foot'])


Expand Down Expand Up @@ -194,18 +195,18 @@ def __init__(self, in_channels, hidden_channels, num_layers, batch_size,
raise ValueError("num_layers must be 1 or greater")
elif num_layers is 1:
modules.append(nn.Linear(in_channels, 4))
modules.append(nn.ReLU())
modules.append(nn.SiLU())
elif num_layers is 2:
modules.append(nn.Linear(in_channels, hidden_channels))
modules.append(nn.ReLU())
modules.append(nn.SiLU())
modules.append(nn.Linear(hidden_channels, 4))
modules.append(nn.ReLU())
modules.append(nn.SiLU())
else:
modules.append(nn.Linear(in_channels, hidden_channels))
modules.append(nn.ReLU())
modules.append(nn.SiLU())
for i in range(0, num_layers - 2):
modules.append(nn.Linear(hidden_channels, hidden_channels))
modules.append(nn.ReLU())
modules.append(nn.SiLU())
modules.append(nn.Linear(hidden_channels, 4))
#modules.append(nn.ReLU())

Expand Down

0 comments on commit 20693f7

Please sign in to comment.