Skip to content

Commit

Permalink
minor updates:
Browse files Browse the repository at this point in the history
- add annotations to paper demo
- add script to convert resnet best results to latex table
- add conv3d to converter
- formatting to make ruff happy
  • Loading branch information
plutonium-239 committed Jul 6, 2024
1 parent 962ac7e commit f69f725
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 16 deletions.
17 changes: 9 additions & 8 deletions experiments/paper_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,14 @@
# "resnext101_64x4d",
# ]
# models = prefix_in_pairs("memsave_", models)
# # models = ["resnet101", "memsave_resnet101_conv", "memsave_resnet101_conv+relu+bn", "memsave_resnet101_conv_full"]
# batch_size = 64
# input_channels = 3
# input_HW = 224
# num_classes = 1000
# device = "cuda"
# architecture = "conv"
# cases = collect_results.select_cases(['All', 'Input', 'Conv', 'Norm', 'SurgicalLast'])
# cases = collect_results.select_cases(['All', 'Input', 'Conv', 'Norm', 'SurgicalFirst', 'SurgicalLast'])

# ============== TRANSFORMER CONFIG ==============
# Valid choices for models are in models.transformer_model_fns
Expand Down Expand Up @@ -78,15 +79,15 @@

# ============== LINEAR CONFIG ==============
# Valid choices for models are in models.linear_model_fns
# models = ['deeplinearmodel']
# models = ["deeplinearmodel"]
# models += [f"memsave_{m}" for m in models] # add memsave versions for each model
# batch_size = 32768
# input_channels = 3
# input_HW = 64
# batch_size = 1024
# input_channels = 3 # ignored
# input_HW = 64 # square of this is passed in estimate.py
# num_classes = 1000
# device = 'cuda'
# architecture = 'linear' # use high batch size
# cases = collect_results.select_cases(['All', 'Input', 'Linear'])
# device = "cuda"
# architecture = "linear" # use high batch size
# cases = collect_results.select_cases(["All", "Input", "Linear"])


if __name__ == "__main__":
Expand Down
39 changes: 39 additions & 0 deletions experiments/resnet_best_results_to_latex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
"""Simple script to make a latex table from resnet results"""

import pandas as pd

df = pd.read_csv("results/resnet101_only/best_results-conv-cpu-usage_stats.csv")
df = df.set_index("model")
df = df.drop(columns=["Scaled M", "Scaled T"])
df = df.drop("memsave_resnet101_conv+relu+bn")
df = df[df["case"] != "SurgicalLast"]
df = df[df["case"] != "Conv"]

mem_div = df[df["case"] == "All"].loc["resnet101", "Memory Usage (GB)"]
time_div = df[df["case"] == "All"].loc["resnet101", "Time Taken (s)"]
df["Scaled M"] = df["Memory Usage (GB)"] / mem_div
df["Scaled T"] = df["Time Taken (s)"] / time_div

df["Memory [GiB]"] = df.apply(
lambda x: f"{x['Memory Usage (GB)']:.2f} ({x['Scaled M']:.2f})", axis=1
)
df["Time [s]"] = df.apply(
lambda x: f"{x['Time Taken (s)']:.2f} ({x['Scaled T']:.2f})", axis=1
)

df = df.drop(columns=["Scaled M", "Scaled T", "Memory Usage (GB)", "Time Taken (s)"])
df_p = df.pivot_table(
index="model", columns="case", values=df.columns[1:], aggfunc=lambda x: x
)

labels = {
"resnet101": "Default ResNet-101",
"memsave_resnet101_conv": "+ swap Convolution",
"memsave_resnet101_conv_full": "+ swap BatchNorm, ReLU",
}

df_p = df_p.rename(index=labels)
df_p = df_p.sort_index(ascending=False)

print(df_p["Memory [GiB]"].to_latex())
print(df_p["Time [s]"].to_latex())
7 changes: 4 additions & 3 deletions experiments/visual_abstract/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,17 @@
from os import makedirs, path

from memory_profiler import memory_usage
from torch import allclose, manual_seed, rand, rand_like
from torch.autograd import grad
from torch.nn import BatchNorm2d, Conv1d, Conv2d, Conv3d, Linear, Sequential

from memsave_torch.nn import (
MemSaveBatchNorm2d,
MemSaveConv1d,
MemSaveConv2d,
MemSaveConv3d,
MemSaveLinear,
)
from torch import allclose, manual_seed, rand, rand_like
from torch.autograd import grad
from torch.nn import BatchNorm2d, Conv1d, Conv2d, Conv3d, Linear, Sequential

HEREDIR = path.dirname(path.abspath(__file__))
DATADIR = path.join(HEREDIR, "raw")
Expand Down
Binary file removed experiments/visual_abstract/visual_abstract.pdf
Binary file not shown.
1 change: 1 addition & 0 deletions memsave_torch/nn/Conv3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import torch
import torch.nn as nn

from memsave_torch.nn.functional import conv3dMemSave


Expand Down
18 changes: 13 additions & 5 deletions memsave_torch/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import sys

import torch.nn as nn

from memsave_torch.nn import functional # noqa: F401
from memsave_torch.nn.BatchNorm import MemSaveBatchNorm2d
from memsave_torch.nn.Conv1d import MemSaveConv1d
Expand All @@ -34,8 +35,9 @@
def convert_to_memory_saving(
model: nn.Module,
linear=True,
conv1d=True,
conv2d=True,
conv1d=False,
conv3d=True,
batchnorm2d=True,
relu=True,
maxpool2d=True,
Expand All @@ -53,8 +55,9 @@ def convert_to_memory_saving(
Args:
model (nn.Module): The input model
linear (bool, optional): Whether to replace `nn.Linear` layers
conv2d (bool, optional): Whether to replace `nn.Conv2d` layers
conv1d (bool, optional): Whether to replace `nn.Conv1d` layers
conv2d (bool, optional): Whether to replace `nn.Conv2d` layers
conv3d (bool, optional): Whether to replace `nn.Conv3d` layers
batchnorm2d (bool, optional): Whether to replace `nn.BatchNorm2d` layers
relu (bool, optional): Whether to replace `nn.ReLU` layers
maxpool2d (bool, optional): Whether to replace `nn.MaxPool2d` layers
Expand Down Expand Up @@ -89,15 +92,20 @@ def convert_to_memory_saving(
"cls": nn.MaxPool2d,
"convert_fn": MemSaveMaxPool2d.from_nn_MaxPool2d,
},
{
"allowed": conv1d,
"cls": nn.Conv1d,
"convert_fn": MemSaveConv1d.from_nn_Conv1d,
},
{
"allowed": conv2d,
"cls": nn.Conv2d,
"convert_fn": MemSaveConv2d.from_nn_Conv2d,
},
{
"allowed": conv1d,
"cls": nn.Conv1d,
"convert_fn": MemSaveConv1d.from_nn_Conv1d,
"allowed": conv3d,
"cls": nn.Conv3d,
"convert_fn": MemSaveConv3d.from_nn_Conv3d,
},
{
"allowed": batchnorm2d,
Expand Down

0 comments on commit f69f725

Please sign in to comment.