Skip to content

Commit

Permalink
Optimize memory usage in DeepStateModel and DeepARNativeModel by …
Browse files Browse the repository at this point in the history
…eliminating copying during making samples (#499)

* eliminate samples copying in DeepARNative and DeepState

* update changelog

* fix

* fix changelog

* add checks on copying arrays

---------

Co-authored-by: Egor Baturin <[email protected]>
  • Loading branch information
egoriyaa and Egor Baturin authored Oct 29, 2024
1 parent 327da67 commit 6cd66ae
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 26 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix working with `embedding_sizes` in `202-NN_examples` notebook ([#489](https://github.com/etna-team/etna/pull/489))
- Disallow dropping target in `TSDataset.drop_features` ([#491](https://github.com/etna-team/etna/pull/491))
- Optimize memory usage in `TFTNativeModel` by eliminating copying during making samples ([#494](https://github.com/etna-team/etna/pull/494))
-
- Optimize memory usage in `DeepStateModel` and `DeepARNativeModel` by eliminating copying during making samples ([#499](https://github.com/etna-team/etna/pull/499))
-
-
-
Expand Down
20 changes: 12 additions & 8 deletions etna/models/nn/deepar_native/deepar.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,11 @@ def forward(self, x: DeepARNativeBatch, *args, **kwargs): # type: ignore
decoder_categorical = x["decoder_categorical"] # each (batch_size, decoder_length, 1)
decoder_target = x["decoder_target"].float() # (batch_size, decoder_length, 1)
decoder_length = decoder_real.shape[1]
weights = x["weight"]

# scale target values at index 0
encoder_real[:, :, 0] = encoder_real[:, :, 0] / weights.unsqueeze(1)
decoder_real[:, :, 0] = decoder_real[:, :, 0] / weights.unsqueeze(1)

encoder_embeddings = self.embedding(encoder_categorical) if self.embedding is not None else torch.Tensor()
decoder_embeddings = self.embedding(decoder_categorical) if self.embedding is not None else torch.Tensor()
Expand Down Expand Up @@ -191,7 +196,11 @@ def step(self, batch: DeepARNativeBatch, *args, **kwargs): # type: ignore
decoder_categorical = batch["decoder_categorical"] # each (batch_size, decoder_length, 1)
encoder_target = batch["encoder_target"].float() # (batch_size, encoder_length-1, 1)
decoder_target = batch["decoder_target"].float() # (batch_size, decoder_length, 1)
weights = batch["weight"]
weights = batch["weight"] # (batch_size)

# scale target values at index 0
encoder_real[:, :, 0] = encoder_real[:, :, 0] / weights.unsqueeze(1)
decoder_real[:, :, 0] = decoder_real[:, :, 0] / weights.unsqueeze(1)

encoder_embeddings = self.embedding(encoder_categorical) if self.embedding is not None else torch.Tensor()
decoder_embeddings = self.embedding(decoder_categorical) if self.embedding is not None else torch.Tensor()
Expand Down Expand Up @@ -255,11 +264,10 @@ def _make(
return None

# Get shifted target and concatenate it with real values features
sample["decoder_real"] = values_real[start_idx + encoder_length : start_idx + total_sample_length].copy()
sample["decoder_real"] = values_real[start_idx + encoder_length : start_idx + total_sample_length]

# Get shifted target and concatenate it with real values features
sample["encoder_real"] = values_real[start_idx : start_idx + encoder_length].copy()
sample["encoder_real"] = sample["encoder_real"][1:]
sample["encoder_real"] = values_real[start_idx + 1 : start_idx + encoder_length]

for index, feature in enumerate(self.embedding_sizes.keys()):
sample["encoder_categorical"][feature] = values_categorical[index][
Expand All @@ -276,10 +284,6 @@ def _make(

sample["segment"] = segment
sample["weight"] = 1 + sample["encoder_target"].mean() if self.scale else 1
sample["encoder_real"][:, 0] = values_real[start_idx + 1 : start_idx + encoder_length, 0] / sample["weight"]
sample["decoder_real"][:, 0] = (
values_real[start_idx + encoder_length : start_idx + total_sample_length, 0] / sample["weight"]
)

return sample

Expand Down
26 changes: 12 additions & 14 deletions etna/models/nn/deepstate/deepstate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Optional
from typing import Tuple

import numpy as np
import pandas as pd
from typing_extensions import TypedDict

Expand Down Expand Up @@ -117,9 +118,9 @@ def step(self, batch: DeepStateBatch, *args, **kwargs): # type: ignore
:
loss, true_target, prediction_target
"""
encoder_real = batch["encoder_real"] # (batch_size, seq_length, input_size)
encoder_real = batch["encoder_real"].float() # (batch_size, seq_length, input_size)
encoder_categorical = batch["encoder_categorical"] # each (batch_size, seq_length, 1)
targets = batch["encoder_target"] # (batch_size, seq_length, 1)
targets = batch["encoder_target"].float() # (batch_size, seq_length, 1)
seq_length = targets.shape[1]
datetime_index = batch["datetime_index"].permute(1, 0, 2)[
:, :, :seq_length
Expand Down Expand Up @@ -159,11 +160,11 @@ def forward(self, x: DeepStateBatch, *args, **kwargs): # type: ignore
:
forecast with shape (batch_size, decoder_length, 1)
"""
encoder_real = x["encoder_real"] # (batch_size, seq_length, input_size)
encoder_real = x["encoder_real"].float() # (batch_size, seq_length, input_size)
encoder_categorical = x["encoder_categorical"] # each (batch_size, seq_length, 1)
seq_length = encoder_real.shape[1]
targets = x["encoder_target"][:, :seq_length] # (batch_size, seq_length, 1)
decoder_real = x["decoder_real"] # (batch_size, horizon, input_size)
targets = x["encoder_target"][:, :seq_length].float() # (batch_size, seq_length, 1)
decoder_real = x["decoder_real"].float() # (batch_size, horizon, input_size)
decoder_categorical = x["decoder_categorical"] # each (batch_size, horizon, 1)
datetime_index_train = x["datetime_index"].permute(1, 0, 2)[
:, :, :seq_length
Expand Down Expand Up @@ -213,26 +214,23 @@ def forward(self, x: DeepStateBatch, *args, **kwargs): # type: ignore
def make_samples(self, df: pd.DataFrame, encoder_length: int, decoder_length: int) -> Iterator[dict]:
"""Make samples from segment DataFrame."""
values_real = df.drop(columns=["target", "segment", "timestamp"] + list(self.embedding_sizes.keys())).values
values_real = torch.from_numpy(values_real).float()

# Categories that were not seen during `fit` will be filled with new category
for feature in self.embedding_sizes:
df[feature] = df[feature].astype(float).fillna(self.embedding_sizes[feature][0])

# Columns in `values_categorical` are in the same order as in `embedding_sizes`
values_categorical = torch.from_numpy(df[self.embedding_sizes.keys()].values.T)
values_categorical = df[self.embedding_sizes.keys()].values.T

values_datetime = torch.from_numpy(self.ssm.generate_datetime_index(df["timestamp"]))
values_datetime = values_datetime.to(torch.int64)
values_datetime = self.ssm.generate_datetime_index(df["timestamp"]).astype(int)
values_target = df["target"].values
values_target = torch.from_numpy(values_target).float()
segment = df["segment"].values[0]

def _make(
values_target: torch.Tensor,
values_real: torch.Tensor,
values_categorical: torch.Tensor,
values_datetime: torch.Tensor,
values_target: np.ndarray,
values_real: np.ndarray,
values_categorical: np.ndarray,
values_datetime: np.ndarray,
segment: str,
start_idx: int,
encoder_length: int,
Expand Down
11 changes: 8 additions & 3 deletions tests/test_models/test_nn/deepar_native/test_deepar_native.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,11 @@ def test_deepar_make_samples(df_name, scale, weights, cat_columns, request):
num_samples_check = 2
df["target_shifted"] = df["target"].shift(1)
for i in range(num_samples_check):
df[f"target_shifted_scaled_{i}"] = df["target_shifted"] / weights[i]
expected_sample = {
"encoder_real": df[[f"target_shifted_scaled_{i}", "regressor_float", "regressor_int"]]
"encoder_real": df[["target_shifted", "regressor_float", "regressor_int"]]
.iloc[1 + i : encoder_length + i]
.values,
"decoder_real": df[[f"target_shifted_scaled_{i}", "regressor_float", "regressor_int"]]
"decoder_real": df[["target_shifted", "regressor_float", "regressor_int"]]
.iloc[encoder_length + i : encoder_length + decoder_length + i]
.values,
"encoder_categorical": {
Expand Down Expand Up @@ -138,6 +137,12 @@ def test_deepar_make_samples(df_name, scale, weights, cat_columns, request):
assert ts_samples[i]["segment"] == "segment_1"
for key in expected_sample:
np.testing.assert_equal(ts_samples[i][key], expected_sample[key])
if "categorical" in key:
for column in ts_samples[i][key]:
assert ts_samples[i][key][column].base is not None
else:
if key != "weight":
assert ts_samples[i][key].base is not None


@pytest.mark.parametrize("encoder_length", [1, 2, 10])
Expand Down
5 changes: 5 additions & 0 deletions tests/test_models/test_nn/test_deepstate.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,11 @@ def test_deepstate_make_samples(df_name, cat_columns, request):
assert ts_samples[i]["segment"] == "segment_1"
for key in expected_sample:
np.testing.assert_equal(ts_samples[i][key], expected_sample[key])
if "categorical" in key:
for column in ts_samples[i][key]:
assert ts_samples[i][key][column].base is not None
else:
assert ts_samples[i][key].base is not None


def test_save_load(example_tsds):
Expand Down

0 comments on commit 6cd66ae

Please sign in to comment.