-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
add results dir as option, add github action for ruff checks
- Loading branch information
1 parent
dd15c20
commit 8eb3cf0
Showing
7 changed files
with
157 additions
and
76 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
name: Ruff | ||
on: [push, pull_request] | ||
jobs: | ||
ruff: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- uses: actions/checkout@v4 | ||
- name: ruff | ||
env: | ||
RUFF_OUTPUT_FORMAT: github | ||
run: | | ||
pip install ruff | ||
ruff format --check | ||
ruff check |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,4 @@ | ||
"""memsave_torch package""" | ||
|
||
import memsave_torch.nn as nn # noqa: F401 | ||
import memsave_torch.util as util # noqa: F401 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,40 +1,66 @@ | ||
"""Simple script that goes over the raw results and finds the best results.""" | ||
|
||
import argparse | ||
import os | ||
from glob import glob | ||
from itertools import product | ||
|
||
import pandas as pd | ||
|
||
from memsave_torch.util.collect_results import case_mapping | ||
|
||
for device, arch in product(["cuda", "cpu"], ["linear", "conv"]): | ||
# usage stats | ||
df = None | ||
idx_col = ["model", "case"] | ||
for fname in glob(f"results/usage_stats-{arch}-{device}-*.csv"): | ||
with open(fname) as f: | ||
f.readline() | ||
temp_df = pd.read_csv(f, index_col=idx_col) | ||
df = temp_df if df is None else pd.concat([df, temp_df]) | ||
if df is not None: | ||
df = df.rename(index=case_mapping, level=1) | ||
df["Memory Usage (GB)"] = df["Memory Usage (MB)"] / 1024 | ||
df = df.drop(columns=["Memory Usage (MB)"]) | ||
best_results = df.groupby(idx_col).min() | ||
# scale | ||
maxes = best_results.groupby(["model"]).max() | ||
best_results[["Scaled T", "Scaled M"]] = best_results / maxes | ||
best_results.to_csv(f"results/best_results-{arch}-{device}-usage_stats.csv") | ||
|
||
# savings | ||
df = None | ||
idx_col = ["model", "input_vjps"] | ||
for fname in glob(f"results/savings-{arch}-{device}*.csv"): | ||
with open(fname) as f: | ||
f.readline() | ||
temp_df = pd.read_csv(f, index_col=idx_col) | ||
df = temp_df if df is None else pd.concat([df, temp_df]) | ||
|
||
if df is not None: | ||
best_results = df.groupby(idx_col).max() | ||
best_results.to_csv(f"results/best_results-{arch}-{device}-savings.csv") | ||
|
||
def main(base_dir: str): | ||
"""Gets the best results from all previous runs | ||
Args: | ||
base_dir (str): The base results dir | ||
""" | ||
for device, arch in product(["cuda", "cpu"], ["linear", "conv"]): | ||
# usage stats | ||
df = None | ||
idx_col = ["model", "case"] | ||
for fname in glob(os.path.join(base_dir, f"usage_stats-{arch}-{device}-*.csv")): | ||
with open(fname) as f: | ||
f.readline() | ||
temp_df = pd.read_csv(f, index_col=idx_col) | ||
df = temp_df if df is None else pd.concat([df, temp_df]) | ||
if df is not None: | ||
df = df.rename(index=case_mapping, level=1) | ||
df["Memory Usage (GB)"] = df["Memory Usage (MB)"] / 1024 | ||
df = df.drop(columns=["Memory Usage (MB)"]) | ||
best_results = df.groupby(idx_col).min() | ||
# scale | ||
maxes = best_results.groupby(["model"]).max() | ||
best_results[["Scaled T", "Scaled M"]] = best_results / maxes | ||
best_results.to_csv( | ||
os.path.join(base_dir, f"best_results-{arch}-{device}-usage_stats.csv") | ||
) | ||
|
||
# savings | ||
df = None | ||
idx_col = ["model", "input_vjps"] | ||
for fname in glob(os.path.join(base_dir, f"savings-{arch}-{device}*.csv")): | ||
with open(fname) as f: | ||
f.readline() | ||
temp_df = pd.read_csv(f, index_col=idx_col) | ||
df = temp_df if df is None else pd.concat([df, temp_df]) | ||
|
||
if df is not None: | ||
best_results = df.groupby(idx_col).max() | ||
best_results.to_csv( | ||
os.path.join(base_dir, f"best_results-{arch}-{device}-savings.csv") | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument( | ||
"--results_dir", type=str, default="results/", help="The base results dir" | ||
) | ||
args = parser.parse_args() | ||
|
||
base_dir = args.results_dir | ||
os.path.exists(base_dir) | ||
|
||
main(base_dir) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.