forked from Eddy-Stanford/QBO-History-Matching
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tidy_hm_data.py
48 lines (42 loc) · 1.67 KB
/
tidy_hm_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env python3
"""
Tidy up history matching, create single CSV file with all wave relevant information merged into one place
Consistent with EKI file format, allows direct comparison between methods
"""
if __name__ == "__main__":
import argparse
import os
import re
import sys
from glob import glob
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(
description=(
"Tidy up history matching, create single CSV file with all wave relevant information merged into one place\n"
"Consistent with EKI file format, allows direct comparison between methods"
)
)
parser.add_argument(
"input_dir", help="Input directory of history matching experiment"
)
args = parser.parse_args()
base_files = sorted(
glob(os.path.join(args.input_dir, "*_samples.csv")),
key=lambda x: int(re.search(r"\d+(?=_samples.csv)", x).group()),
)
## TODO: make i f
for i, base in enumerate(base_files):
df = pd.read_csv(base, index_col="run_id")
analysis_directory = os.path.join(args.input_dir, f"wave_{i}", "analysis")
try:
output = np.load(os.path.join(analysis_directory, "y.npy"))
output_err = np.load(os.path.join(analysis_directory, "y_err.npy"))
except FileNotFoundError:
print(f"[WARN] No output data for {i} - skipping", file=sys.stderr)
continue
df[["period", "amplitude"]] = output
df[["period_sem", "amplitude_sem"]] = output_err
df.to_csv(os.path.join(os.path.dirname(base), f"output_{i}.csv"))
print(f"[INFO] Done wave {i}")
print("[DONE] All done ")