-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlinear_regression_solve.py
76 lines (65 loc) · 2.52 KB
/
linear_regression_solve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
"""
This tutorial shows how to perform iterative Bayesian calibration for a linear regression model
using GrainLearning.
"""
import os
from math import floor, log
from grainlearning import BayesianCalibration
from grainlearning.dynamic_systems import IODynamicSystem
PATH = os.path.abspath(os.path.dirname(__file__))
executable = f'python {PATH}/linear_model.py'
def run_sim(calib):
"""
Run the external executable and passes the parameter sample to generate the output file.
"""
system = calib.system
# keep the naming convention consistent between iterations
mag = floor(log(system.num_samples, 10)) + 1
# check the software name and version
print("*** Running external software... ***\n")
# loop over and pass parameter samples to the executable
for i, params in enumerate(system.param_data):
description = 'Iter' + str(system.curr_iter) + '_Sample' + str(i).zfill(mag)
linear(executable, params, system.sim_name, description)
def linear(executable, params, sim_name, description):
print(" ".join([executable, "%.8e %.8e" % tuple(params), sim_name, description]))
os.system(' '.join([executable, "%.8e %.8e" % tuple(params), sim_name, description]))
calibration = BayesianCalibration.from_dict(
{
"num_iter": 10,
"callback": run_sim,
"system": {
"system_type": IODynamicSystem,
"param_min": [0.001, 0.001],
"param_max": [1, 10],
"param_names": ['a', 'b'],
"num_samples": 20,
"obs_data_file": PATH + '/linear_obs.dat',
"obs_names": ['f'],
"ctrl_name": 'u',
"sim_name": 'linear',
"sim_data_dir": PATH + '/sim_data/',
"sim_data_file_ext": '.txt',
"sigma_tol": 0.01,
},
"inference": {
"Bayes_filter": {"ess_target": 0.3},
"sampling": {
"max_num_components": 1,
"n_init": 1,
"random_state": 0,
"slice_sampling": False,
}
},
"save_fig": 0,
}
)
calibration.run()
most_prob_params = calibration.get_most_prob_params()
print(f'Most probable parameter values: {most_prob_params}')
error_tolerance = 0.1
error = most_prob_params - [0.2, 5.0]
assert abs(
error[0]) / 0.2 < error_tolerance, f"Model parameters are not correct, expected 0.2 but got {most_prob_params[0]}"
assert abs(
error[1]) / 5.0 < error_tolerance, f"Model parameters are not correct, expected 5.0 but got {most_prob_params[1]}"