Skip to content

Commit

Permalink
small_speed_improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
elephaint committed Sep 26, 2024
1 parent b9e1c8e commit 536e8f6
Show file tree
Hide file tree
Showing 11 changed files with 26 additions and 30 deletions.
4 changes: 2 additions & 2 deletions action_files/test_models/src/multivariate_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from neuralforecast.models.tsmixer import TSMixer
from neuralforecast.models.tsmixerx import TSMixerx
from neuralforecast.models.itransformer import iTransformer
from neuralforecast.models.stemgnn import StemGNN
# from neuralforecast.models.stemgnn import StemGNN
from neuralforecast.models.mlpmultivariate import MLPMultivariate
from neuralforecast.models.timemixer import TimeMixer

Expand All @@ -30,7 +30,7 @@ def main(dataset: str = 'multivariate', group: str = 'ETTm2') -> None:
TSMixer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500),
TSMixerx(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500),
iTransformer(h=horizon, n_series=7, input_size=2 * horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500),
StemGNN(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout_rate=0.0, max_steps=1000, val_check_steps=500),
# StemGNN(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout_rate=0.0, max_steps=1000, val_check_steps=500),
MLPMultivariate(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), max_steps=1000, val_check_steps=500),
TimeMixer(h=horizon, n_series=7, input_size=2*horizon, loss=MAE(), dropout=0.0, max_steps=1000, val_check_steps=500)
]
Expand Down
2 changes: 1 addition & 1 deletion nbs/common.base_model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@
" if self.val_size == 0:\n",
" return\n",
" losses = torch.stack(self.validation_step_outputs)\n",
" avg_loss = losses.mean().item()\n",
" avg_loss = losses.mean().detach()\n",
" self.log(\n",
" \"ptl/val_loss\",\n",
" avg_loss,\n",
Expand Down
6 changes: 3 additions & 3 deletions nbs/common.base_multivariate.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -390,12 +390,12 @@
"\n",
" self.log(\n",
" 'train_loss',\n",
" loss.item(),\n",
" loss.detach(),\n",
" batch_size=outsample_y.size(0),\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
" )\n",
" self.train_trajectories.append((self.global_step, loss.item()))\n",
" self.train_trajectories.append((self.global_step, loss.detach()))\n",
" return loss\n",
"\n",
" def validation_step(self, batch, batch_idx):\n",
Expand Down Expand Up @@ -440,7 +440,7 @@
"\n",
" self.log(\n",
" 'valid_loss',\n",
" valid_loss.item(),\n",
" valid_loss.detach(),\n",
" batch_size=outsample_y.size(0),\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
Expand Down
6 changes: 3 additions & 3 deletions nbs/common.base_recurrent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -365,12 +365,12 @@
"\n",
" self.log(\n",
" 'train_loss',\n",
" loss.item(),\n",
" loss.detach(),\n",
" batch_size=outsample_y.size(0),\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
" )\n",
" self.train_trajectories.append((self.global_step, loss.item()))\n",
" self.train_trajectories.append((self.global_step, loss.detach()))\n",
" return loss\n",
"\n",
" def validation_step(self, batch, batch_idx):\n",
Expand Down Expand Up @@ -438,7 +438,7 @@
"\n",
" self.log(\n",
" 'valid_loss',\n",
" valid_loss.item(),\n",
" valid_loss.detach(),\n",
" batch_size=outsample_y.size(0),\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
Expand Down
6 changes: 3 additions & 3 deletions nbs/common.base_windows.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -447,12 +447,12 @@
"\n",
" self.log(\n",
" 'train_loss',\n",
" loss.item(),\n",
" loss.detach(),\n",
" batch_size=outsample_y.size(0),\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
" )\n",
" self.train_trajectories.append((self.global_step, loss.item()))\n",
" self.train_trajectories.append((self.global_step, loss.detach()))\n",
" return loss\n",
"\n",
" def _compute_valid_loss(self, outsample_y, output, outsample_mask, temporal_cols, y_idx):\n",
Expand Down Expand Up @@ -533,7 +533,7 @@
"\n",
" self.log(\n",
" 'valid_loss',\n",
" valid_loss.item(),\n",
" valid_loss.detach(),\n",
" batch_size=batch_size,\n",
" prog_bar=True,\n",
" on_epoch=True,\n",
Expand Down
6 changes: 2 additions & 4 deletions nbs/losses.pytorch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,7 @@
" Auxiliary funtion to handle divide by 0\n",
" \"\"\"\n",
" div = a / b\n",
" div[div != div] = 0.0\n",
" div[div == float('inf')] = 0.0\n",
" return div"
" return torch.nan_to_num(div, nan=0.0, posinf=0.0, neginf=0.0)"
]
},
{
Expand Down Expand Up @@ -1412,7 +1410,7 @@
" if (loc is not None) and (scale is not None):\n",
" mean = (mean * scale) + loc\n",
" tscale = (tscale + eps) * scale\n",
" df = 2.0 + F.softplus(df)\n",
" df = 3.0 + F.softplus(df)\n",
" return (df, mean, tscale)\n",
"\n",
"def normal_domain_map(input: torch.Tensor):\n",
Expand Down
2 changes: 1 addition & 1 deletion neuralforecast/common/_base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ def on_validation_epoch_end(self):
if self.val_size == 0:
return
losses = torch.stack(self.validation_step_outputs)
avg_loss = losses.mean().item()
avg_loss = losses.mean().detach()
self.log(
"ptl/val_loss",
avg_loss,
Expand Down
6 changes: 3 additions & 3 deletions neuralforecast/common/_base_multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,12 +389,12 @@ def training_step(self, batch, batch_idx):

self.log(
"train_loss",
loss.item(),
loss.detach(),
batch_size=outsample_y.size(0),
prog_bar=True,
on_epoch=True,
)
self.train_trajectories.append((self.global_step, loss.item()))
self.train_trajectories.append((self.global_step, loss.detach()))
return loss

def validation_step(self, batch, batch_idx):
Expand Down Expand Up @@ -456,7 +456,7 @@ def validation_step(self, batch, batch_idx):

self.log(
"valid_loss",
valid_loss.item(),
valid_loss.detach(),
batch_size=outsample_y.size(0),
prog_bar=True,
on_epoch=True,
Expand Down
6 changes: 3 additions & 3 deletions neuralforecast/common/_base_recurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,12 +349,12 @@ def training_step(self, batch, batch_idx):

self.log(
"train_loss",
loss.item(),
loss.detach(),
batch_size=outsample_y.size(0),
prog_bar=True,
on_epoch=True,
)
self.train_trajectories.append((self.global_step, loss.item()))
self.train_trajectories.append((self.global_step, loss.detach()))
return loss

def validation_step(self, batch, batch_idx):
Expand Down Expand Up @@ -447,7 +447,7 @@ def validation_step(self, batch, batch_idx):

self.log(
"valid_loss",
valid_loss.item(),
valid_loss.detach(),
batch_size=outsample_y.size(0),
prog_bar=True,
on_epoch=True,
Expand Down
6 changes: 3 additions & 3 deletions neuralforecast/common/_base_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,12 +440,12 @@ def training_step(self, batch, batch_idx):

self.log(
"train_loss",
loss.item(),
loss.detach(),
batch_size=outsample_y.size(0),
prog_bar=True,
on_epoch=True,
)
self.train_trajectories.append((self.global_step, loss.item()))
self.train_trajectories.append((self.global_step, loss.detach()))
return loss

def _compute_valid_loss(
Expand Down Expand Up @@ -551,7 +551,7 @@ def validation_step(self, batch, batch_idx):

self.log(
"valid_loss",
valid_loss.item(),
valid_loss.detach(),
batch_size=batch_size,
prog_bar=True,
on_epoch=True,
Expand Down
6 changes: 2 additions & 4 deletions neuralforecast/losses/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,7 @@ def _divide_no_nan(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
Auxiliary funtion to handle divide by 0
"""
div = a / b
div[div != div] = 0.0
div[div == float("inf")] = 0.0
return div
return torch.nan_to_num(div, nan=0.0, posinf=0.0, neginf=0.0)

# %% ../../nbs/losses.pytorch.ipynb 7
def _weighted_mean(losses, weights):
Expand Down Expand Up @@ -825,7 +823,7 @@ def student_scale_decouple(output, loc=None, scale=None, eps: float = 0.1):
if (loc is not None) and (scale is not None):
mean = (mean * scale) + loc
tscale = (tscale + eps) * scale
df = 2.0 + F.softplus(df)
df = 3.0 + F.softplus(df)
return (df, mean, tscale)


Expand Down

0 comments on commit 536e8f6

Please sign in to comment.