Skip to content

Commit

Permalink
eliminate samples copying in DeepARNative and DeepState
Browse files Browse the repository at this point in the history
  • Loading branch information
Egor Baturin committed Oct 28, 2024
1 parent 327da67 commit a23e8f2
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 25 deletions.
19 changes: 11 additions & 8 deletions etna/models/nn/deepar_native/deepar.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,10 @@ def forward(self, x: DeepARNativeBatch, *args, **kwargs): # type: ignore
decoder_categorical = x["decoder_categorical"] # each (batch_size, decoder_length, 1)
decoder_target = x["decoder_target"].float() # (batch_size, decoder_length, 1)
decoder_length = decoder_real.shape[1]
weights = x["weight"]

encoder_real[:, :, 0] = encoder_real[:, :, 0] / weights.unsqueeze(1)
decoder_real[:, :, 0] = decoder_real[:, :, 0] / weights.unsqueeze(1)

encoder_embeddings = self.embedding(encoder_categorical) if self.embedding is not None else torch.Tensor()
decoder_embeddings = self.embedding(decoder_categorical) if self.embedding is not None else torch.Tensor()
Expand Down Expand Up @@ -191,7 +195,11 @@ def step(self, batch: DeepARNativeBatch, *args, **kwargs): # type: ignore
decoder_categorical = batch["decoder_categorical"] # each (batch_size, decoder_length, 1)
encoder_target = batch["encoder_target"].float() # (batch_size, encoder_length-1, 1)
decoder_target = batch["decoder_target"].float() # (batch_size, decoder_length, 1)
weights = batch["weight"]
weights = batch["weight"] # (batch_size)

# scale target values at index 0
encoder_real[:, :, 0] = encoder_real[:, :, 0] / weights.unsqueeze(1)
decoder_real[:, :, 0] = decoder_real[:, :, 0] / weights.unsqueeze(1)

encoder_embeddings = self.embedding(encoder_categorical) if self.embedding is not None else torch.Tensor()
decoder_embeddings = self.embedding(decoder_categorical) if self.embedding is not None else torch.Tensor()
Expand Down Expand Up @@ -255,11 +263,10 @@ def _make(
return None

# Get shifted target and concatenate it with real values features
sample["decoder_real"] = values_real[start_idx + encoder_length : start_idx + total_sample_length].copy()
sample["decoder_real"] = values_real[start_idx + encoder_length : start_idx + total_sample_length]

# Get shifted target and concatenate it with real values features
sample["encoder_real"] = values_real[start_idx : start_idx + encoder_length].copy()
sample["encoder_real"] = sample["encoder_real"][1:]
sample["encoder_real"] = values_real[start_idx + 1 : start_idx + encoder_length]

for index, feature in enumerate(self.embedding_sizes.keys()):
sample["encoder_categorical"][feature] = values_categorical[index][
Expand All @@ -276,10 +283,6 @@ def _make(

sample["segment"] = segment
sample["weight"] = 1 + sample["encoder_target"].mean() if self.scale else 1
sample["encoder_real"][:, 0] = values_real[start_idx + 1 : start_idx + encoder_length, 0] / sample["weight"]
sample["decoder_real"][:, 0] = (
values_real[start_idx + encoder_length : start_idx + total_sample_length, 0] / sample["weight"]
)

return sample

Expand Down
26 changes: 12 additions & 14 deletions etna/models/nn/deepstate/deepstate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Optional
from typing import Tuple

import numpy as np
import pandas as pd
from typing_extensions import TypedDict

Expand Down Expand Up @@ -117,9 +118,9 @@ def step(self, batch: DeepStateBatch, *args, **kwargs): # type: ignore
:
loss, true_target, prediction_target
"""
encoder_real = batch["encoder_real"] # (batch_size, seq_length, input_size)
encoder_real = batch["encoder_real"].float() # (batch_size, seq_length, input_size)
encoder_categorical = batch["encoder_categorical"] # each (batch_size, seq_length, 1)
targets = batch["encoder_target"] # (batch_size, seq_length, 1)
targets = batch["encoder_target"].float() # (batch_size, seq_length, 1)
seq_length = targets.shape[1]
datetime_index = batch["datetime_index"].permute(1, 0, 2)[
:, :, :seq_length
Expand Down Expand Up @@ -159,11 +160,11 @@ def forward(self, x: DeepStateBatch, *args, **kwargs): # type: ignore
:
forecast with shape (batch_size, decoder_length, 1)
"""
encoder_real = x["encoder_real"] # (batch_size, seq_length, input_size)
encoder_real = x["encoder_real"].float() # (batch_size, seq_length, input_size)
encoder_categorical = x["encoder_categorical"] # each (batch_size, seq_length, 1)
seq_length = encoder_real.shape[1]
targets = x["encoder_target"][:, :seq_length] # (batch_size, seq_length, 1)
decoder_real = x["decoder_real"] # (batch_size, horizon, input_size)
targets = x["encoder_target"][:, :seq_length].float() # (batch_size, seq_length, 1)
decoder_real = x["decoder_real"].float() # (batch_size, horizon, input_size)
decoder_categorical = x["decoder_categorical"] # each (batch_size, horizon, 1)
datetime_index_train = x["datetime_index"].permute(1, 0, 2)[
:, :, :seq_length
Expand Down Expand Up @@ -213,26 +214,23 @@ def forward(self, x: DeepStateBatch, *args, **kwargs): # type: ignore
def make_samples(self, df: pd.DataFrame, encoder_length: int, decoder_length: int) -> Iterator[dict]:
"""Make samples from segment DataFrame."""
values_real = df.drop(columns=["target", "segment", "timestamp"] + list(self.embedding_sizes.keys())).values
values_real = torch.from_numpy(values_real).float()

# Categories that were not seen during `fit` will be filled with new category
for feature in self.embedding_sizes:
df[feature] = df[feature].astype(float).fillna(self.embedding_sizes[feature][0])

# Columns in `values_categorical` are in the same order as in `embedding_sizes`
values_categorical = torch.from_numpy(df[self.embedding_sizes.keys()].values.T)
values_categorical = df[self.embedding_sizes.keys()].values.T

values_datetime = torch.from_numpy(self.ssm.generate_datetime_index(df["timestamp"]))
values_datetime = values_datetime.to(torch.int64)
values_datetime = self.ssm.generate_datetime_index(df["timestamp"]).astype(int)
values_target = df["target"].values
values_target = torch.from_numpy(values_target).float()
segment = df["segment"].values[0]

def _make(
values_target: torch.Tensor,
values_real: torch.Tensor,
values_categorical: torch.Tensor,
values_datetime: torch.Tensor,
values_target: np.ndarray,
values_real: np.ndarray,
values_categorical: np.ndarray,
values_datetime: np.ndarray,
segment: str,
start_idx: int,
encoder_length: int,
Expand Down
5 changes: 2 additions & 3 deletions tests/test_models/test_nn/deepar_native/test_deepar_native.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,11 @@ def test_deepar_make_samples(df_name, scale, weights, cat_columns, request):
num_samples_check = 2
df["target_shifted"] = df["target"].shift(1)
for i in range(num_samples_check):
df[f"target_shifted_scaled_{i}"] = df["target_shifted"] / weights[i]
expected_sample = {
"encoder_real": df[[f"target_shifted_scaled_{i}", "regressor_float", "regressor_int"]]
"encoder_real": df[["target_shifted", "regressor_float", "regressor_int"]]
.iloc[1 + i : encoder_length + i]
.values,
"decoder_real": df[[f"target_shifted_scaled_{i}", "regressor_float", "regressor_int"]]
"decoder_real": df[["target_shifted", "regressor_float", "regressor_int"]]
.iloc[encoder_length + i : encoder_length + decoder_length + i]
.values,
"encoder_categorical": {
Expand Down

0 comments on commit a23e8f2

Please sign in to comment.