Skip to content

Commit

Permalink
renamed dataloader , rescaled target, removed target scale, target ce…
Browse files Browse the repository at this point in the history
…nter
  • Loading branch information
youssefmecky96 committed Nov 20, 2023
1 parent f435717 commit ce078a1
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 46 deletions.
21 changes: 9 additions & 12 deletions configs/prediction_models/DeepARpytorch.gin

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion configs/prediction_models/TFTpytorch.gin
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ model/hyperparameter.lr_scheduler = "exponential"

# Dataset params
PredictionDatasetTFTpytorch.max_encoder_length = 24
PredictionDatasetTFTpytorch.max_prediction_length = 24
PredictionDatasetTFTpytorch.max_prediction_length = 1
PredictionDatasetTFTpytorch.target="label"
PredictionDatasetTFTpytorch.time_varying_known_reals=["time_idx"]
PredictionDatasetTFTpytorch.add_relative_time_idx=False
Expand Down
2 changes: 1 addition & 1 deletion configs/prediction_models/common/DLCommon.gin
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ base_regression_preprocessor.generate_features = False
# Train params
train_common.optimizer = @Adam
train_common.epochs = 1000
train_common.batch_size = 1
train_common.batch_size = 64
train_common.patience = 20
train_common.min_delta = 1e-4

Expand Down
2 changes: 1 addition & 1 deletion configs/prediction_models/common/DLTuning.gin
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Hyperparameter tuner settings for Deep Learning.
tune_hyperparameters.scopes = ["model", "optimizer"]
tune_hyperparameters.n_initial_points = 5
tune_hyperparameters.n_calls = 30
tune_hyperparameters.n_calls = 15
tune_hyperparameters.folds_to_tune_on = 2
2 changes: 1 addition & 1 deletion configs/tasks/Regression.gin
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ preprocess.use_static = True

# SPECIFYING REGRESSION OUTCOME SCALING
base_regression_preprocessor.outcome_min = 0
base_regression_preprocessor.outcome_max = 15
base_regression_preprocessor.outcome_max = 168

# SELECTING DATASET
PredictionDataset.vars = %vars
Expand Down
8 changes: 4 additions & 4 deletions icu_benchmarks/data/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,8 +429,8 @@ def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, Tensor]:
return from_numpy(window.values).to(float32)


@gin.configurable("PredictionDatasetTFTpytorch")
class PredictionDatasetTFTpytorch(TimeSeriesDataSet):
@gin.configurable("PredictionDatasetpytorch")
class PredictionDatasetpytorch(TimeSeriesDataSet):
"""Subclass of timeseries dataset works with pyotrch forecasting library .
Args:
Expand Down Expand Up @@ -498,8 +498,8 @@ def __init__(
time_varying_unknown_categoricals=time_varying_unknown_categoricals,
time_varying_unknown_reals=time_varying_unknown_reals,
add_relative_time_idx=add_relative_time_idx,
add_target_scales=True,
add_encoder_length=True,
# add_target_scales=True,
# add_encoder_length=True,
predict_mode=True,
target_normalizer=None,
)
Expand Down
46 changes: 25 additions & 21 deletions icu_benchmarks/models/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from icu_benchmarks.data.loader import (
PredictionDataset,
ImputationDataset,
PredictionDatasetTFTpytorch,
PredictionDatasetpytorch,
)
from icu_benchmarks.models.utils import save_config_file, JSONMetricsLogger
from icu_benchmarks.contants import RunMode
Expand Down Expand Up @@ -111,7 +111,7 @@ def train_common(
ImputationDataset
if mode == RunMode.imputation
else (
PredictionDatasetTFTpytorch if (pytorch_forecasting) else PredictionDataset
PredictionDatasetpytorch if (pytorch_forecasting) else PredictionDataset
)
)

Expand Down Expand Up @@ -277,31 +277,37 @@ def train_common(

if explain:

attributions_Saliency = model.explantation_captum(
test_loader=test_loader,
method=Saliency, log_dir=log_dir, plot=True
)
print("saliency", attributions_Saliency)
# attributions_Saliency = model.explantation_captum(
# test_loader=test_loader,
# method=Saliency, log_dir=log_dir, plot=True
# )

# print("saliency", attributions_Saliency)
"""
attributions_shap = model.explantation_captum(
test_loader=test_loader,
method=ShapleyValueSampling, log_dir=log_dir, plot=True, n_samples=10
)
print("shap", attributions_shap)
attributions_IG = model.explantation_captum(
test_loader=test_loader,
method=IntegratedGradients, log_dir=log_dir, plot=True, n_steps=20
)
print("IG", attributions_IG)
"""
# attributions_IG = model.explantation_captum(
# test_loader=test_loader,
# method=IntegratedGradients, log_dir=log_dir, plot=True, n_steps=20
# )
# print("IG", attributions_IG)
Interpertations = model.interpertations(test_loader, log_dir, plot=True)
print("attention", Interpertations)
attributions_dict = {
"attributions_Saliency": attributions_Saliency.tolist(),
"attributions_IG": attributions_IG.tolist(),
"attributions_shap": attributions_shap.tolist(),
# "attributions_shap": attributions_shap.tolist(),
"attention_weights": Interpertations["attention"].tolist(),
"static_variables": Interpertations["static_variables"].tolist(),
"encoder_variables": Interpertations["encoder_variables"].tolist()
}
variable_importance = np.concatenate((Interpertations["static_variables"].cpu().detach().numpy(),
Interpertations["encoder_variables"].cpu().detach().numpy()))
ind = np.argpartition(variable_importance, -5)[-5:]

# Path to the JSON file in log_dir
json_file_path = f"{log_dir}/Attributions.json"
Expand All @@ -310,26 +316,24 @@ def train_common(
with open(json_file_path, 'w') as json_file:
json.dump(attributions_dict, json_file)
if XAI_metric:

random_attributions = np.random.normal(size=24)

F_baseline = model.Faithfulness_Correlation(test_loader, random_attributions, pertrub='Noise')
F_baseline = model.Faithfulness_Correlation(test_loader, random_attributions, pertrub='Noise', ind=ind)
print('Random normal faithfulness correlation', F_baseline)

F_attribution = model.Faithfulness_Correlation(test_loader, attributions_IG, pertrub='Noise')
print('Attributions faithfulness correlation', F_attribution)
F_attention = model.Faithfulness_Correlation(test_loader, Interpertations["attention"], pertrub='Noise')
F_attention = model.Faithfulness_Correlation(
test_loader, Interpertations["attention"], pertrub='Noise', ind=ind)
print('Attention faithfulness correlation', F_attention)
F_saliency = model.Faithfulness_Correlation(test_loader, attributions_Saliency, pertrub='Noise')
print('Saliency faithfulness correlation', F_saliency)
F_shap = model.Faithfulness_Correlation(test_loader, attributions_shap, pertrub='Noise')
print('shap faithfulness correlation', F_shap)
# F_shap = model.Faithfulness_Correlation(test_loader, attributions_shap, pertrub='Noise')
# print('shap faithfulness correlation', F_shap)
XAI_dict = {
"Faithfulness_correlation_normal_random": F_baseline,
"Faithfulness_correlation_IG": F_attribution,
"Faithfulness_correlation_attention": F_attention,
"Faithfulness_correlation_saliency": F_saliency,
"Faithfulness_correlation_shap": F_shap
# "Faithfulness_correlation_shap": F_shap
}

# Path to the JSON file in log_dir
Expand Down
18 changes: 13 additions & 5 deletions icu_benchmarks/models/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@ def explantation_captum(self, test_loader, method, log_dir=".", plot=False, **kw
explantation = method(self.forward_captum)
# Reformat attributions.
attr_all_timesteps = []
for time_step in range(23, 24):
for time_step in range(0, 24):
if method is not captum.attr.Saliency:
attr = explantation.attribute(
data, target=(time_step), baselines=baselines, **kwargs
Expand Down Expand Up @@ -593,7 +593,7 @@ def explantation_captum(self, test_loader, method, log_dir=".", plot=False, **kw
return means

def Faithfulness_Correlation(
self, test_loader, attribution, similarity_func=None, nr_runs=2, pertrub=None, subset_size=6, features=False
self, test_loader, attribution, similarity_func=None, nr_runs=2, pertrub=None, subset_size=3, ind=[]
):
"""
Implementation of faithfulness correlation by Bhatt et al., 2020.
Expand Down Expand Up @@ -654,16 +654,24 @@ def Faithfulness_Correlation(

if pertrub == "Noise":
# add normal noise to input

noise = torch.randn_like(x["encoder_cont"])
if (len(ind) == 0):
x["encoder_cont"][:, a_ix, :] += noise[:, a_ix, :]
else:

x["encoder_cont"][:, a_ix[:, None], ind] += noise[:, a_ix[:, None], ind]

x["encoder_cont"][:, a_ix, :] += noise[:, a_ix, :]
elif pertrub == "baseline":
# Create a mask tensor with zeros at specified time steps and ones everywhere else
# pytorch bug need to change to cpu for next step and then revert
mask = torch.ones_like(x["encoder_cont"]).cpu()

mask[:, a_ix, :] = 0
if (len(ind) == 0):
mask[:, a_ix, :] = 0
else:
mask[:, a_ix, ind] = 0
mask = mask.to(x["encoder_cont"].device)

x["encoder_cont"] = x["encoder_cont"] * mask
data = (
x["encoder_cat"],
Expand Down

0 comments on commit ce078a1

Please sign in to comment.