-
Notifications
You must be signed in to change notification settings - Fork 9
/
eval_json.py
executable file
·44 lines (39 loc) · 1.49 KB
/
eval_json.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import argparse
import importlib
import os
import json
from utils.configure import Configure
def eval_loose_json(args):
cfgargs = Configure.Get('')
evaluator = importlib.import_module("metrics.meta_tuning.evaluator").EvaluateTool(cfgargs)
with open(args.json_file, "rb") as f:
data = json.load(f)
preds = [item['prediction'] for item in data]
labs = data
summary = evaluator.evaluate(preds, labs, "test")
print(summary)
def main(args):
# use import lib to import EvaluateTool from metrics.{args.dataset_name}.evaluator
output_path= f"./output/{args.run_name}"
predictions_path = os.path.join(output_path,"predictions_predict.json")
config_path = f"{args.run_name}.cfg"
args = Configure.Get(config_path)
evaluator = importlib.import_module("metrics.meta_tuning.evaluator").EvaluateTool(args)
with open(predictions_path, "rb") as f:
data = json.load(f)
preds = [item['prediction'] for item in data]
labs = data
summary = evaluator.evaluate(preds, labs, "test")
print(summary)
with open(os.path.join(output_path, "summary.json"), "w") as f:
json.dump(summary, f, indent=4)
if __name__=="__main__":
# args: name of the data, name of the run,
parser = argparse.ArgumentParser()
parser.add_argument("--run_name", type=str, default="run1")
parser.add_argument("--json_file", type=str, default=None)
args = parser.parse_args()
if args.json_file:
eval_loose_json(args)
else:
main(args)