From 14f572e36189d29f9fda405be4571fe877cbc191 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 16 Apr 2024 08:42:44 +0000 Subject: [PATCH 01/20] remove redundant seed fixing code and logger init code, and remove this behaviour to yaml --- examples/RegAE/RegAE.py | 13 ------ examples/RegAE/conf/RegAE.yaml | 3 ++ examples/amgnet/amgnet_airfoil.py | 22 ---------- examples/amgnet/amgnet_cylinder.py | 22 ---------- examples/amgnet/conf/amgnet_airfoil.yaml | 3 ++ examples/amgnet/conf/amgnet_cylinder.yaml | 3 ++ examples/biharmonic2d/biharmonic2d.py | 10 ----- examples/biharmonic2d/conf/biharmonic2d.yaml | 3 ++ examples/bracket/bracket.py | 5 --- examples/bubble/bubble.py | 20 --------- examples/bubble/conf/bubble.yaml | 3 ++ examples/cfdgcn/cfdgcn.py | 18 -------- examples/cfdgcn/conf/cfdgcn.yaml | 3 ++ .../control_arm/conf/forward_analysis.yaml | 3 ++ .../control_arm/conf/inverse_parameter.yaml | 3 ++ examples/control_arm/forward_analysis.py | 13 ------ examples/control_arm/inverse_parameter.py | 24 ----------- .../conf/cylinder2d_unsteady_Re100.yaml | 3 ++ .../2d_unsteady/cylinder2d_unsteady_Re100.py | 15 ------- .../transformer_physx/conf/enn.yaml | 3 ++ .../transformer_physx/conf/transformer.yaml | 3 ++ .../transformer_physx/train_enn.py | 22 ---------- .../transformer_physx/train_transformer.py | 20 --------- examples/darcy/conf/darcy2d.yaml | 3 ++ examples/darcy/darcy2d.py | 12 ------ examples/deepcfd/conf/deepcfd.yaml | 3 ++ examples/deepcfd/deepcfd.py | 21 ---------- examples/deephpms/burgers.py | 9 ---- examples/deephpms/conf/burgers.yaml | 3 ++ examples/deephpms/conf/korteweg_de_vries.yaml | 3 ++ .../deephpms/conf/kuramoto_sivashinsky.yaml | 3 ++ examples/deephpms/conf/navier_stokes.yaml | 3 ++ examples/deephpms/conf/schrodinger.yaml | 3 ++ examples/deephpms/korteweg_de_vries.py | 9 ---- examples/deephpms/kuramoto_sivashinsky.py | 9 ---- examples/deephpms/navier_stokes.py | 24 ----------- examples/deephpms/schrodinger.py | 24 ----------- examples/epnn/conf/epnn.yaml | 3 ++ examples/epnn/epnn.py | 13 ------ .../conf/fourcastnet_finetune.yaml | 3 ++ .../fourcastnet/conf/fourcastnet_precip.yaml | 3 ++ .../conf/fourcastnet_pretrain.yaml | 3 ++ examples/fourcastnet/train_finetune.py | 23 ---------- examples/fourcastnet/train_precip.py | 22 ---------- examples/fourcastnet/train_pretrain.py | 22 ---------- examples/fsi/viv.py | 10 ----- examples/gpinn/conf/poisson_1d.yaml | 3 ++ examples/gpinn/poisson_1d.py | 11 ----- .../heat_exchanger/conf/heat_exchanger.yaml | 3 ++ examples/heat_exchanger/heat_exchanger.py | 42 ------------------- examples/heat_pinn/conf/heat_pinn.yaml | 3 ++ examples/heat_pinn/heat_pinn.py | 12 ------ examples/hpinns/conf/hpinns.yaml | 3 ++ examples/hpinns/holography.py | 9 ---- examples/ide/conf/volterra_ide.yaml | 3 ++ examples/ide/volterra_ide.py | 13 ------ examples/ldc/conf/ldc2d_steady_Re10.yaml | 3 ++ examples/ldc/conf/ldc2d_unsteady_Re10.yaml | 3 ++ examples/ldc/ldc2d_steady_Re10.py | 12 ------ examples/ldc/ldc2d_unsteady_Re10.py | 12 ------ examples/lorenz/conf/enn.yaml | 3 ++ examples/lorenz/conf/transformer.yaml | 3 ++ examples/lorenz/train_enn.py | 22 ---------- examples/lorenz/train_transformer.py | 18 -------- examples/nowcastnet/conf/nowcastnet.yaml | 3 ++ examples/nowcastnet/nowcastnet.py | 6 --- examples/nsfnet/VP_NSFNet1.py | 20 --------- examples/nsfnet/VP_NSFNet2.py | 27 ------------ examples/nsfnet/VP_NSFNet3.py | 22 ---------- examples/operator_learning/conf/deeponet.yaml | 3 ++ examples/operator_learning/deeponet.py | 12 ------ examples/phycrnet/conf/burgers_equations.yaml | 3 ++ .../conf/fitzhugh_nagumo_RD_equation.yaml | 3 ++ .../conf/lambda_omega_RD_equation.yaml | 3 ++ examples/phycrnet/main.py | 12 ------ examples/phylstm/conf/phylstm2.yaml | 3 ++ examples/phylstm/conf/phylstm3.yaml | 3 ++ examples/phylstm/phylstm2.py | 22 ---------- examples/phylstm/phylstm3.py | 27 ------------ examples/pipe/poiseuille_flow.py | 10 ----- examples/rossler/conf/enn.yaml | 3 ++ examples/rossler/conf/transformer.yaml | 3 ++ examples/rossler/train_enn.py | 22 ---------- examples/rossler/train_transformer.py | 22 ---------- .../shock_wave/conf/shock_wave_Ma0.728.yaml | 3 ++ .../shock_wave/conf/shock_wave_Ma2.0.yaml | 3 ++ examples/shock_wave/shock_wave.py | 13 ------ examples/tempoGAN/conf/tempogan.yaml | 3 ++ examples/tempoGAN/tempoGAN.py | 33 --------------- examples/topopt/conf/topopt.yaml | 3 ++ examples/topopt/topopt.py | 10 ----- 91 files changed, 132 insertions(+), 811 deletions(-) diff --git a/examples/RegAE/RegAE.py b/examples/RegAE/RegAE.py index b32755bded..c662933ad5 100644 --- a/examples/RegAE/RegAE.py +++ b/examples/RegAE/RegAE.py @@ -14,23 +14,15 @@ from __future__ import annotations -from os import path as osp - import hydra import paddle from omegaconf import DictConfig from paddle.nn import functional as F import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.AutoEncoder(**cfg.MODEL) @@ -114,11 +106,6 @@ def loss_expr(output_dict, label_dict, weight_dict=None): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.AutoEncoder(**cfg.MODEL) diff --git a/examples/RegAE/conf/RegAE.yaml b/examples/RegAE/conf/RegAE.yaml index 15c57f82ab..433b05eea7 100644 --- a/examples/RegAE/conf/RegAE.yaml +++ b/examples/RegAE/conf/RegAE.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/amgnet/amgnet_airfoil.py b/examples/amgnet/amgnet_airfoil.py index 24f08665ed..e1bbccccf2 100644 --- a/examples/amgnet/amgnet_airfoil.py +++ b/examples/amgnet/amgnet_airfoil.py @@ -14,7 +14,6 @@ from __future__ import annotations -from os import path as osp from typing import TYPE_CHECKING from typing import Dict from typing import List @@ -53,11 +52,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -76,7 +70,6 @@ def train(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -102,11 +95,6 @@ def train(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -152,11 +140,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -170,11 +153,6 @@ def evaluate(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, diff --git a/examples/amgnet/amgnet_cylinder.py b/examples/amgnet/amgnet_cylinder.py index 80a5ff9bd5..05c486fd96 100644 --- a/examples/amgnet/amgnet_cylinder.py +++ b/examples/amgnet/amgnet_cylinder.py @@ -14,7 +14,6 @@ from __future__ import annotations -from os import path as osp from typing import TYPE_CHECKING from typing import Dict from typing import List @@ -53,11 +52,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set cylinder model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -76,7 +70,6 @@ def train(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -102,11 +95,6 @@ def train(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -152,11 +140,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set airfoil model model = ppsci.arch.AMGNet(**cfg.MODEL) @@ -170,11 +153,6 @@ def evaluate(cfg: DictConfig): "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, diff --git a/examples/amgnet/conf/amgnet_airfoil.yaml b/examples/amgnet/conf/amgnet_airfoil.yaml index b4813e33ee..dc13d528ed 100644 --- a/examples/amgnet/conf/amgnet_airfoil.yaml +++ b/examples/amgnet/conf/amgnet_airfoil.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/amgnet/conf/amgnet_cylinder.yaml b/examples/amgnet/conf/amgnet_cylinder.yaml index 43f692a539..8bae989744 100644 --- a/examples/amgnet/conf/amgnet_cylinder.yaml +++ b/examples/amgnet/conf/amgnet_cylinder.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index ad4f08256d..ef1b8c227a 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -69,11 +69,6 @@ def plotting(figname, output_dir, data, griddata_points, griddata_xi, boundary): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set models disp_net = ppsci.arch.MLP(**cfg.MODEL) @@ -268,11 +263,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set models disp_net = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index 67bd20f926..6f1cabb93d 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index 381e63ce8f..d1251d23e1 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -277,11 +277,6 @@ def train(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator = ppsci.validate.SupervisedValidator( {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator}, diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index 86ce46a2ac..b0b8e6c0f2 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -32,11 +32,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # load Data data = scipy.io.loadmat(cfg.DATA_PATH) # normalize data @@ -171,11 +166,6 @@ def transform_out(in_, out): "label": test_label, }, "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -249,11 +239,6 @@ def transform_out(in_, out): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # load Data data = scipy.io.loadmat(cfg.DATA_PATH) # normalize data @@ -343,11 +328,6 @@ def transform_out(in_, out): "label": test_label, }, "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, diff --git a/examples/bubble/conf/bubble.yaml b/examples/bubble/conf/bubble.yaml index b94ab5224f..2e6212c38f 100644 --- a/examples/bubble/conf/bubble.yaml +++ b/examples/bubble/conf/bubble.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cfdgcn/cfdgcn.py b/examples/cfdgcn/cfdgcn.py index 779428f243..cdc46a32a4 100644 --- a/examples/cfdgcn/cfdgcn.py +++ b/examples/cfdgcn/cfdgcn.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from typing import Dict from typing import List @@ -25,7 +24,6 @@ from paddle.nn import functional as F import ppsci -from ppsci.utils import logger def train_mse_func( @@ -49,11 +47,6 @@ def eval_rmse_func( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - # set dataloader config train_dataloader_cfg = { "dataset": { @@ -107,11 +100,6 @@ def train(cfg: DictConfig): "transpose_edges": True, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, @@ -174,7 +162,6 @@ def evaluate(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -207,11 +194,6 @@ def evaluate(cfg: DictConfig): "transpose_edges": True, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } rmse_validator = ppsci.validate.SupervisedValidator( eval_dataloader_cfg, diff --git a/examples/cfdgcn/conf/cfdgcn.yaml b/examples/cfdgcn/conf/cfdgcn.yaml index a935ced58f..a7c945461f 100644 --- a/examples/cfdgcn/conf/cfdgcn.yaml +++ b/examples/cfdgcn/conf/cfdgcn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/control_arm/conf/forward_analysis.yaml b/examples/control_arm/conf/forward_analysis.yaml index 2ae0aeaa94..9690391509 100644 --- a/examples/control_arm/conf/forward_analysis.yaml +++ b/examples/control_arm/conf/forward_analysis.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/control_arm/conf/inverse_parameter.yaml b/examples/control_arm/conf/inverse_parameter.yaml index d1eb16e9e6..fbe130a19b 100644 --- a/examples/control_arm/conf/inverse_parameter.yaml +++ b/examples/control_arm/conf/inverse_parameter.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/control_arm/forward_analysis.py b/examples/control_arm/forward_analysis.py index c8fe365bef..abdfbb09bf 100644 --- a/examples/control_arm/forward_analysis.py +++ b/examples/control_arm/forward_analysis.py @@ -1,19 +1,12 @@ -from os import path as osp - import hydra import numpy as np from omegaconf import DictConfig from paddle import distributed as dist import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") # set parallel enable_parallel = dist.get_world_size() > 1 @@ -55,7 +48,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint @@ -212,11 +204,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) diff --git a/examples/control_arm/inverse_parameter.py b/examples/control_arm/inverse_parameter.py index 9e2a43be2d..bc112a1e25 100644 --- a/examples/control_arm/inverse_parameter.py +++ b/examples/control_arm/inverse_parameter.py @@ -1,18 +1,10 @@ -from os import path as osp - import hydra from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) @@ -67,7 +59,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, "batch_size": cfg.TRAIN.batch_size.arm_interior, }, ppsci.loss.MSELoss("sum"), @@ -98,11 +89,6 @@ def train(cfg: DictConfig): geom["geo"], { "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "total_size": cfg.EVAL.total_size.validator, "batch_size": cfg.EVAL.batch_size.validator, }, @@ -169,11 +155,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) @@ -207,11 +188,6 @@ def evaluate(cfg: DictConfig): geom["geo"], { "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "total_size": cfg.EVAL.total_size.validator, "batch_size": cfg.EVAL.batch_size.validator, }, diff --git a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml index dc96d3c98b..a73e240a91 100644 --- a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml +++ b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py index 2f9d02e4b9..45c05b4198 100644 --- a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +++ b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -24,12 +23,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -180,7 +173,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, @@ -229,12 +221,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -273,7 +259,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), metric={"MSE": ppsci.metric.MSE()}, diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml index 51590a3552..189570622c 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml index 7fff3caa74..804042c38d 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml @@ -15,6 +15,9 @@ hydra: - output_dir - log_freq - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py index 021f6d0d4b..1f213f30d6 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py @@ -18,7 +18,6 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp import hydra import numpy as np @@ -26,7 +25,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray, visc: np.ndarray): @@ -50,11 +48,6 @@ def get_mean_std(data: np.ndarray, visc: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -133,11 +126,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -170,11 +158,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -238,11 +221,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py index aadfd4ce5b..9f9d8ae716 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py @@ -18,7 +18,6 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp from typing import Dict import hydra @@ -28,7 +27,6 @@ import ppsci from ppsci.arch import base -from ppsci.utils import logger from ppsci.utils import save_load @@ -56,11 +54,6 @@ def __call__(self, x: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -117,11 +110,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -185,9 +173,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # directly evaluate pretrained model(optional) - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -205,11 +190,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index 5c15abbb77..1efbd856a9 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index 7f0a3646f8..e50cf01f55 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -25,11 +25,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -94,7 +89,6 @@ def poisson_ref_compute_func(_in): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -199,11 +193,6 @@ def poisson_ref_compute_func(_in): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -231,7 +220,6 @@ def poisson_ref_compute_func(_in): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, diff --git a/examples/deepcfd/conf/deepcfd.yaml b/examples/deepcfd/conf/deepcfd.yaml index e7a5a488c5..92ca6401f2 100644 --- a/examples/deepcfd/conf/deepcfd.yaml +++ b/examples/deepcfd/conf/deepcfd.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deepcfd/deepcfd.py b/examples/deepcfd/deepcfd.py index b205d0745f..a11628e88e 100644 --- a/examples/deepcfd/deepcfd.py +++ b/examples/deepcfd/deepcfd.py @@ -24,7 +24,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def split_tensors( @@ -199,11 +198,6 @@ def predict_and_save_plot( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - # initialize datasets with open(cfg.DATAX_PATH, "rb") as file: x = pickle.load(file) @@ -279,11 +273,6 @@ def loss_expr( "label": {"output": test_y}, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } def metric_expr( @@ -342,11 +331,6 @@ def metric_expr( def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "eval.log"), "info") - # initialize datasets with open(cfg.DATAX_PATH, "rb") as file: x = pickle.load(file) @@ -396,11 +380,6 @@ def loss_expr( "label": {"output": test_y}, }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } def metric_expr( diff --git a/examples/deephpms/burgers.py b/examples/deephpms/burgers.py index 9e65b7ba8f..de1b528a57 100644 --- a/examples/deephpms/burgers.py +++ b/examples/deephpms/burgers.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -57,10 +56,6 @@ def boundary_loss_func(output_dict, *args): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize burgers boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -339,10 +334,6 @@ def transform_f_sol(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize burgers boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/conf/burgers.yaml b/examples/deephpms/conf/burgers.yaml index 681a8ed41a..0053252ff9 100644 --- a/examples/deephpms/conf/burgers.yaml +++ b/examples/deephpms/conf/burgers.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/korteweg_de_vries.yaml b/examples/deephpms/conf/korteweg_de_vries.yaml index 6933436a61..9c78dedbea 100644 --- a/examples/deephpms/conf/korteweg_de_vries.yaml +++ b/examples/deephpms/conf/korteweg_de_vries.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/kuramoto_sivashinsky.yaml b/examples/deephpms/conf/kuramoto_sivashinsky.yaml index e03ded052b..41308852c4 100644 --- a/examples/deephpms/conf/kuramoto_sivashinsky.yaml +++ b/examples/deephpms/conf/kuramoto_sivashinsky.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/navier_stokes.yaml b/examples/deephpms/conf/navier_stokes.yaml index fa6ba24e89..14d3e0ea5b 100644 --- a/examples/deephpms/conf/navier_stokes.yaml +++ b/examples/deephpms/conf/navier_stokes.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/conf/schrodinger.yaml b/examples/deephpms/conf/schrodinger.yaml index c650675071..5d0fb77da9 100644 --- a/examples/deephpms/conf/schrodinger.yaml +++ b/examples/deephpms/conf/schrodinger.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/deephpms/korteweg_de_vries.py b/examples/deephpms/korteweg_de_vries.py index 9f97fbef91..8ab06e6092 100644 --- a/examples/deephpms/korteweg_de_vries.py +++ b/examples/deephpms/korteweg_de_vries.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -63,10 +62,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -349,10 +344,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/kuramoto_sivashinsky.py b/examples/deephpms/kuramoto_sivashinsky.py index af6a29115a..6d324eea75 100644 --- a/examples/deephpms/kuramoto_sivashinsky.py +++ b/examples/deephpms/kuramoto_sivashinsky.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -66,10 +65,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) @@ -349,10 +344,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator when order >= 4 paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(cfg.T_UB) diff --git a/examples/deephpms/navier_stokes.py b/examples/deephpms/navier_stokes.py index a857f22469..9e0cf9e8ff 100644 --- a/examples/deephpms/navier_stokes.py +++ b/examples/deephpms/navier_stokes.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -43,10 +42,6 @@ def pde_l2_rel_func(output_dict, *args): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries # t, x, y lb = paddle.to_tensor(list(cfg.LB)) @@ -148,11 +143,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_idn = ppsci.validate.SupervisedValidator( @@ -229,11 +219,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_pde = ppsci.validate.SupervisedValidator( @@ -338,11 +323,6 @@ def transform_f(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_sol = ppsci.validate.SupervisedValidator( @@ -374,10 +354,6 @@ def transform_f(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries # t, x, y lb = paddle.to_tensor(list(cfg.LB)) diff --git a/examples/deephpms/schrodinger.py b/examples/deephpms/schrodinger.py index 84e070b9e6..36fcf2913b 100644 --- a/examples/deephpms/schrodinger.py +++ b/examples/deephpms/schrodinger.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -74,10 +73,6 @@ def sol_l2_rel_func(output_dict, label_dict): def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(np.pi / cfg.T_UB) @@ -184,11 +179,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_idn = ppsci.validate.SupervisedValidator( @@ -263,11 +253,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_pde = ppsci.validate.SupervisedValidator( @@ -389,11 +374,6 @@ def transform_fg(_in): }, }, "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator_sol = ppsci.validate.SupervisedValidator( @@ -425,10 +405,6 @@ def transform_fg(_in): def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # initialize boundaries t_lb = paddle.to_tensor(cfg.T_LB) t_ub = paddle.to_tensor(np.pi / cfg.T_UB) diff --git a/examples/epnn/conf/epnn.yaml b/examples/epnn/conf/epnn.yaml index 9a4d6c7471..ed7d8f0ba4 100644 --- a/examples/epnn/conf/epnn.yaml +++ b/examples/epnn/conf/epnn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/epnn/epnn.py b/examples/epnn/epnn.py index 0c161b1856..8a65663f13 100755 --- a/examples/epnn/epnn.py +++ b/examples/epnn/epnn.py @@ -16,23 +16,15 @@ Reference: https://github.com/meghbali/ANNElastoplasticity """ -from os import path as osp import functions import hydra from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - ( input_dict_train, label_dict_train, @@ -123,11 +115,6 @@ def _transform_in_stress(_in): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - ( input_dict_train, _, diff --git a/examples/fourcastnet/conf/fourcastnet_finetune.yaml b/examples/fourcastnet/conf/fourcastnet_finetune.yaml index caee7a752d..91e2372df4 100644 --- a/examples/fourcastnet/conf/fourcastnet_finetune.yaml +++ b/examples/fourcastnet/conf/fourcastnet_finetune.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/conf/fourcastnet_precip.yaml b/examples/fourcastnet/conf/fourcastnet_precip.yaml index cef0fee83c..6ac43b9244 100644 --- a/examples/fourcastnet/conf/fourcastnet_precip.yaml +++ b/examples/fourcastnet/conf/fourcastnet_precip.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml index dc64116399..902b785b7f 100644 --- a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml +++ b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index 3d6e2a7817..5bb34ad9c5 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -13,7 +13,6 @@ # limitations under the License. import functools -from os import path as osp from typing import Tuple import h5py @@ -24,7 +23,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_vis_data( @@ -56,12 +54,6 @@ def get_vis_data( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set training hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.TRAIN.num_timestamps)) @@ -123,11 +115,6 @@ def train(cfg: DictConfig): "num_label_timestamps": cfg.TRAIN.num_timestamps, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -193,11 +180,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set testing hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) @@ -238,11 +220,6 @@ def evaluate(cfg: DictConfig): "training": False, "stride": 8, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index d7ea5ad0d8..069f10c227 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -13,7 +13,6 @@ # limitations under the License. import functools -import os.path as osp from typing import Tuple import h5py @@ -24,7 +23,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_vis_data( @@ -58,11 +56,6 @@ def get_vis_data( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", f"{cfg.output_dir}/train.log", "info") - wind_data_mean, wind_data_std = fourcast_utils.get_mean_std( cfg.WIND_MEAN_PATH, cfg.WIND_STD_PATH, cfg.VARS_CHANNEL ) @@ -126,11 +119,6 @@ def train(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -188,11 +176,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set testing hyper-parameters output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) @@ -243,11 +226,6 @@ def evaluate(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } # set metirc diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index f6699612bd..bb24a2b602 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np @@ -21,7 +20,6 @@ import examples.fourcastnet.utils as fourcast_utils import ppsci -from ppsci.utils import logger def get_data_stat(cfg: DictConfig): @@ -38,11 +36,6 @@ def get_data_stat(cfg: DictConfig): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - data_mean, data_std = fourcast_utils.get_mean_std( cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL ) @@ -119,11 +112,6 @@ def train(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } @@ -182,11 +170,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - data_mean, data_std = fourcast_utils.get_mean_std( cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL ) @@ -214,11 +197,6 @@ def evaluate(cfg: DictConfig): "transforms": transforms, "training": False, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, } diff --git a/examples/fsi/viv.py b/examples/fsi/viv.py index 1b27bf52c6..1b50f2d39a 100644 --- a/examples/fsi/viv.py +++ b/examples/fsi/viv.py @@ -66,11 +66,6 @@ def train(cfg: DictConfig): "label_keys": ("eta", "f"), }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } eta_mse_validator = ppsci.validate.SupervisedValidator( valid_dataloader_cfg, @@ -147,11 +142,6 @@ def evaluate(cfg: DictConfig): "label_keys": ("eta", "f"), }, "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } eta_mse_validator = ppsci.validate.SupervisedValidator( valid_dataloader_cfg, diff --git a/examples/gpinn/conf/poisson_1d.yaml b/examples/gpinn/conf/poisson_1d.yaml index 827fad93a9..5e1a7e8d21 100644 --- a/examples/gpinn/conf/poisson_1d.yaml +++ b/examples/gpinn/conf/poisson_1d.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index a8cb3da1b2..b7436e77fa 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -26,7 +26,6 @@ import ppsci from ppsci.autodiff import jacobian -from ppsci.utils import logger class gPINN1D(ppsci.equation.PDE): @@ -55,11 +54,6 @@ def __init__(self, invar: str, outvar: str): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -214,11 +208,6 @@ def du_x(x: np.ndarray) -> np.ndarray: def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/heat_exchanger/conf/heat_exchanger.yaml b/examples/heat_exchanger/conf/heat_exchanger.yaml index d032230517..41b0c9f948 100644 --- a/examples/heat_exchanger/conf/heat_exchanger.yaml +++ b/examples/heat_exchanger/conf/heat_exchanger.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/heat_exchanger/heat_exchanger.py b/examples/heat_exchanger/heat_exchanger.py index d5a0840606..e3859c01eb 100644 --- a/examples/heat_exchanger/heat_exchanger.py +++ b/examples/heat_exchanger/heat_exchanger.py @@ -13,8 +13,6 @@ # limitations under the License. -from os import path as osp - import hydra import matplotlib.pyplot as plt import numpy as np @@ -25,11 +23,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.HEDeepONets(**cfg.MODEL) @@ -293,11 +286,6 @@ def train(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, @@ -312,11 +300,6 @@ def train(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, @@ -331,11 +314,6 @@ def train(cfg: DictConfig): "label": test_interior_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr=equation["heat_exchanger"].equations, @@ -435,11 +413,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.HEDeepONets(**cfg.MODEL) @@ -520,11 +493,6 @@ def evaluate(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={ @@ -541,11 +509,6 @@ def evaluate(cfg: DictConfig): "label": test_bc_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr={ @@ -562,11 +525,6 @@ def evaluate(cfg: DictConfig): "label": test_interior_label, }, "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.MSELoss("mean"), output_expr=equation["heat_exchanger"].equations, diff --git a/examples/heat_pinn/conf/heat_pinn.yaml b/examples/heat_pinn/conf/heat_pinn.yaml index de3ff85d7d..d30987e1b3 100644 --- a/examples/heat_pinn/conf/heat_pinn.yaml +++ b/examples/heat_pinn/conf/heat_pinn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/heat_pinn/heat_pinn.py b/examples/heat_pinn/heat_pinn.py index 972095775e..d1400ca931 100644 --- a/examples/heat_pinn/heat_pinn.py +++ b/examples/heat_pinn/heat_pinn.py @@ -25,12 +25,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -210,12 +204,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/hpinns/conf/hpinns.yaml b/examples/hpinns/conf/hpinns.yaml index 96f6005150..8176590751 100644 --- a/examples/hpinns/conf/hpinns.yaml +++ b/examples/hpinns/conf/hpinns.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/hpinns/holography.py b/examples/hpinns/holography.py index 76174d42e3..e8e3d0dbd6 100644 --- a/examples/hpinns/holography.py +++ b/examples/hpinns/holography.py @@ -16,7 +16,6 @@ This module is heavily adapted from https://github.com/lululxvi/hpinn """ -from os import path as osp import functions as func_module import hydra @@ -35,10 +34,6 @@ def train(cfg: DictConfig): # open FLAG for higher order differential operator paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) @@ -296,10 +291,6 @@ def evaluate(cfg: DictConfig): # open FLAG for higher order differential operator paddle.framework.core.set_prim_eager_enabled(True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) diff --git a/examples/ide/conf/volterra_ide.yaml b/examples/ide/conf/volterra_ide.yaml index f424b196b5..bb449164e4 100644 --- a/examples/ide/conf/volterra_ide.yaml +++ b/examples/ide/conf/volterra_ide.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ide/volterra_ide.py b/examples/ide/volterra_ide.py index 2f4c473ed4..8289b1c7a3 100644 --- a/examples/ide/volterra_ide.py +++ b/examples/ide/volterra_ide.py @@ -26,16 +26,9 @@ import ppsci from ppsci.autodiff import jacobian -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -195,12 +188,6 @@ def u_solution_func(in_): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/ldc/conf/ldc2d_steady_Re10.yaml b/examples/ldc/conf/ldc2d_steady_Re10.yaml index 079d1e4424..78ddd5028e 100644 --- a/examples/ldc/conf/ldc2d_steady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_steady_Re10.yaml @@ -13,6 +13,9 @@ hydra: - EVAL.pretrained_model_path - mode - output_dir + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml index 0414836af4..12248c757c 100644 --- a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml @@ -13,6 +13,9 @@ hydra: - EVAL.pretrained_model_path - mode - output_dir + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index 37cf32541e..e1ac1046b0 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -11,22 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -168,11 +161,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index 3ed4ba7053..929d71f284 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -11,22 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from os import path as osp import hydra import numpy as np from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -210,11 +203,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/lorenz/conf/enn.yaml b/examples/lorenz/conf/enn.yaml index 0b1239d5ec..c37d6275be 100644 --- a/examples/lorenz/conf/enn.yaml +++ b/examples/lorenz/conf/enn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/lorenz/conf/transformer.yaml b/examples/lorenz/conf/transformer.yaml index d17cf7af80..8d8117526b 100644 --- a/examples/lorenz/conf/transformer.yaml +++ b/examples/lorenz/conf/transformer.yaml @@ -15,6 +15,9 @@ hydra: - output_dir - log_freq - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index d3450e2976..88ad584994 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -18,7 +18,6 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp import hydra import numpy as np @@ -26,7 +25,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -40,11 +38,6 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -121,11 +114,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -157,11 +145,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -223,11 +206,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index a68c404b9e..d889b7a767 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -18,7 +18,6 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp from typing import Dict import hydra @@ -27,7 +26,6 @@ import ppsci from ppsci.arch import base -from ppsci.utils import logger from ppsci.utils import save_load @@ -57,9 +55,6 @@ def train(cfg: DictConfig): # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 # test time-series: 256 time-steps: 1024 # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -117,11 +112,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -176,9 +166,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # directly evaluate pretrained model(optional) - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -196,11 +183,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/nowcastnet/conf/nowcastnet.yaml b/examples/nowcastnet/conf/nowcastnet.yaml index 29ccec60e9..a972592d2d 100644 --- a/examples/nowcastnet/conf/nowcastnet.yaml +++ b/examples/nowcastnet/conf/nowcastnet.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/nowcastnet/nowcastnet.py b/examples/nowcastnet/nowcastnet.py index 6f3fbde79e..8db3587d10 100644 --- a/examples/nowcastnet/nowcastnet.py +++ b/examples/nowcastnet/nowcastnet.py @@ -8,7 +8,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): @@ -16,11 +15,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - if cfg.CASE_TYPE == "large": dataset_path = cfg.LARGE_DATASET_PATH model_cfg = cfg.MODEL.large diff --git a/examples/nsfnet/VP_NSFNet1.py b/examples/nsfnet/VP_NSFNet1.py index 8e917ab6f2..7b06b69cd8 100644 --- a/examples/nsfnet/VP_NSFNet1.py +++ b/examples/nsfnet/VP_NSFNet1.py @@ -66,7 +66,6 @@ def generate_data(N_TRAIN, lam, seed): def train(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed @@ -110,11 +109,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -125,11 +119,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud({"x": x_train, "y": y_train}, ("x", "y")) @@ -254,12 +243,8 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - # set random seed for reproducibility SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -291,11 +276,6 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud({"x": x_train, "y": y_train}, ("x", "y")) diff --git a/examples/nsfnet/VP_NSFNet2.py b/examples/nsfnet/VP_NSFNet2.py index 94419e5735..05a204d983 100644 --- a/examples/nsfnet/VP_NSFNet2.py +++ b/examples/nsfnet/VP_NSFNet2.py @@ -113,11 +113,9 @@ def load_data(path, N_TRAIN, NB_TRAIN, N0_TRAIN): def train(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) ITERS_PER_EPOCH = cfg.iters_per_epoch # set model @@ -162,11 +160,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } train_dataloader_cfg_0 = { @@ -177,11 +170,6 @@ def train(cfg: DictConfig): }, "batch_size": N0_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -192,11 +180,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( @@ -300,11 +283,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -370,11 +348,6 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( diff --git a/examples/nsfnet/VP_NSFNet3.py b/examples/nsfnet/VP_NSFNet3.py index d65995bfe3..d790c49123 100644 --- a/examples/nsfnet/VP_NSFNet3.py +++ b/examples/nsfnet/VP_NSFNet3.py @@ -164,7 +164,6 @@ def main(cfg: DictConfig): def train(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed @@ -222,11 +221,6 @@ def train(cfg: DictConfig): }, "batch_size": NB_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } train_dataloader_cfg_0 = { @@ -237,11 +231,6 @@ def train(cfg: DictConfig): }, "batch_size": N0_TRAIN, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } valida_dataloader_cfg = { @@ -252,11 +241,6 @@ def train(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") @@ -358,7 +342,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") # set random seed for reproducibility SEED = cfg.seed @@ -400,11 +383,6 @@ def evaluate(cfg: DictConfig): }, "total_size": u_star.shape[0], "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } geom = ppsci.geometry.PointCloud( {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") diff --git a/examples/operator_learning/conf/deeponet.yaml b/examples/operator_learning/conf/deeponet.yaml index 5fec01b547..bf81184375 100644 --- a/examples/operator_learning/conf/deeponet.yaml +++ b/examples/operator_learning/conf/deeponet.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/operator_learning/deeponet.py b/examples/operator_learning/deeponet.py index ab8933b294..51328492ab 100644 --- a/examples/operator_learning/deeponet.py +++ b/examples/operator_learning/deeponet.py @@ -3,7 +3,6 @@ """ import os -from os import path as osp from typing import Callable from typing import Tuple @@ -14,15 +13,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.DeepONet(**cfg.MODEL) @@ -152,11 +145,6 @@ def generate_y_u_G_ref( def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set model model = ppsci.arch.DeepONet(**cfg.MODEL) diff --git a/examples/phycrnet/conf/burgers_equations.yaml b/examples/phycrnet/conf/burgers_equations.yaml index 1dee3a0f9f..700419936a 100644 --- a/examples/phycrnet/conf/burgers_equations.yaml +++ b/examples/phycrnet/conf/burgers_equations.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml index 8ed99d8605..dd7a1d54d8 100644 --- a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml +++ b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml index 19c8eb7718..125a4e289c 100644 --- a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml +++ b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phycrnet/main.py b/examples/phycrnet/main.py index ef0f8fab81..21821d0865 100644 --- a/examples/phycrnet/main.py +++ b/examples/phycrnet/main.py @@ -2,7 +2,6 @@ PhyCRNet for solving spatiotemporal PDEs Reference: https://github.com/isds-neu/PhyCRNet/ """ -from os import path as osp import functions import hydra @@ -11,15 +10,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set initial states for convlstm NUM_CONVLSTM = cfg.num_convlstm (h0, c0) = (paddle.randn((1, 128, 16, 16)), paddle.randn((1, 128, 16, 16))) @@ -132,11 +125,6 @@ def _transform_out(_in, _out): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set initial states for convlstm NUM_CONVLSTM = cfg.num_convlstm (h0, c0) = (paddle.randn((1, 128, 16, 16)), paddle.randn((1, 128, 16, 16))) diff --git a/examples/phylstm/conf/phylstm2.yaml b/examples/phylstm/conf/phylstm2.yaml index 3fc184d715..ea4285b156 100644 --- a/examples/phylstm/conf/phylstm2.yaml +++ b/examples/phylstm/conf/phylstm2.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phylstm/conf/phylstm3.yaml b/examples/phylstm/conf/phylstm3.yaml index 0be68339b4..2fcebebffe 100644 --- a/examples/phylstm/conf/phylstm3.yaml +++ b/examples/phylstm/conf/phylstm3.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/phylstm/phylstm2.py b/examples/phylstm/phylstm2.py index 38af11fca5..aecb4e0b93 100755 --- a/examples/phylstm/phylstm2.py +++ b/examples/phylstm/phylstm2.py @@ -16,7 +16,6 @@ Reference: https://github.com/zhry10/PhyLSTM.git """ -from os import path as osp import functions import hydra @@ -25,15 +24,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) ag_data = mat["input_tf"] # ag, ad, av u_data = mat["target_X_tf"] @@ -151,11 +144,6 @@ def train(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -198,11 +186,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) ag_data = mat["input_tf"] # ag, ad, av u_data = mat["target_X_tf"] @@ -292,11 +275,6 @@ def evaluate(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, diff --git a/examples/phylstm/phylstm3.py b/examples/phylstm/phylstm3.py index 071ecbeedb..6e5c4130be 100755 --- a/examples/phylstm/phylstm3.py +++ b/examples/phylstm/phylstm3.py @@ -16,7 +16,6 @@ Reference: https://github.com/zhry10/PhyLSTM.git """ -from os import path as osp import functions import hydra @@ -25,15 +24,9 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) t = mat["time"] dt = 0.02 @@ -129,11 +122,6 @@ def train(cfg: DictConfig): "input": input_dict_train, "label": label_dict_train, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -159,11 +147,6 @@ def train(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, @@ -208,11 +191,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) t = mat["time"] dt = 0.02 @@ -308,11 +286,6 @@ def evaluate(cfg: DictConfig): "input": input_dict_val, "label": label_dict_val, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, "num_workers": 0, }, diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index f9481bb2a7..54d20e12dd 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -128,11 +128,6 @@ def output_trans_p(input, out): "num_workers": 1, "batch_size": cfg.TRAIN.batch_size.pde_constraint, "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, }, loss=ppsci.loss.MSELoss("mean"), evenly=True, @@ -304,11 +299,6 @@ def forward(self, output_dict, label_dict): "weight": weight_dict_KL, } eval_cfg = { - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, "batch_size": 2000, } eval_cfg["dataset"] = dataset_vel diff --git a/examples/rossler/conf/enn.yaml b/examples/rossler/conf/enn.yaml index 0b657e4248..1bad85dfd6 100644 --- a/examples/rossler/conf/enn.yaml +++ b/examples/rossler/conf/enn.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/rossler/conf/transformer.yaml b/examples/rossler/conf/transformer.yaml index 0dab2ec059..a8c8682c00 100644 --- a/examples/rossler/conf/transformer.yaml +++ b/examples/rossler/conf/transformer.yaml @@ -15,6 +15,9 @@ hydra: - output_dir - log_freq - EMBEDDING_MODEL_PATH + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/rossler/train_enn.py b/examples/rossler/train_enn.py index c26dcb89ec..b2ba6ed566 100644 --- a/examples/rossler/train_enn.py +++ b/examples/rossler/train_enn.py @@ -18,7 +18,6 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp import hydra import numpy as np @@ -26,7 +25,6 @@ from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -44,11 +42,6 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -123,11 +116,6 @@ def train(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -158,11 +146,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -222,11 +205,6 @@ def evaluate(cfg: DictConfig): key: value for key, value in zip(cfg.MODEL.output_keys, weights) }, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/rossler/train_transformer.py b/examples/rossler/train_transformer.py index a58b8b8d28..26453e4a32 100644 --- a/examples/rossler/train_transformer.py +++ b/examples/rossler/train_transformer.py @@ -18,7 +18,6 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp from typing import Dict import hydra @@ -27,7 +26,6 @@ import ppsci from ppsci.arch import base -from ppsci.utils import logger from ppsci.utils import save_load @@ -53,11 +51,6 @@ def __call__(self, x: Dict[str, paddle.Tensor]): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -114,11 +107,6 @@ def train(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } @@ -173,11 +161,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -195,11 +178,6 @@ def evaluate(cfg: DictConfig): "stride": 1024, "embedding_model": embedding_model, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 4, } diff --git a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml index 805f7840da..f24273197f 100644 --- a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml index 75d2b72833..5a785a2b62 100644 --- a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/shock_wave/shock_wave.py b/examples/shock_wave/shock_wave.py index 0f59fd0ca4..8523878768 100644 --- a/examples/shock_wave/shock_wave.py +++ b/examples/shock_wave/shock_wave.py @@ -24,7 +24,6 @@ import ppsci from ppsci import equation from ppsci.autodiff import jacobian -from ppsci.utils import logger from ppsci.utils import misc @@ -245,12 +244,6 @@ def generate_bc_left_points( def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) @@ -426,12 +419,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - # set model model = ppsci.arch.MLP(**cfg.MODEL) diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index f0b706b6f1..c415f9a727 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -14,6 +14,9 @@ hydra: - mode - output_dir - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/tempoGAN/tempoGAN.py b/examples/tempoGAN/tempoGAN.py index 511243bd9b..c59d9eba72 100644 --- a/examples/tempoGAN/tempoGAN.py +++ b/examples/tempoGAN/tempoGAN.py @@ -35,10 +35,6 @@ def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - gen_funcs = func_module.GenFuncs( cfg.WEIGHT_GEN, (cfg.WEIGHT_GEN_LAYER if cfg.USE_SPATIALDISC else None) ) @@ -112,11 +108,6 @@ def train(cfg: DictConfig): ), }, "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen), { @@ -144,11 +135,6 @@ def train(cfg: DictConfig): ), }, "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen_tempo), { @@ -189,11 +175,6 @@ def train(cfg: DictConfig): ), }, "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(disc_funcs.loss_func), name="sup_constraint_disc", @@ -230,11 +211,6 @@ def train(cfg: DictConfig): ), }, "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, }, ppsci.loss.FunctionalLoss(disc_funcs.loss_func_tempo), name="sup_constraint_disc_tempo", @@ -329,10 +305,6 @@ def evaluate(cfg: DictConfig): os.makedirs(osp.join(cfg.output_dir, "eval_outs"), exist_ok=True) - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) # load dataset @@ -357,11 +329,6 @@ def evaluate(cfg: DictConfig): }, "label": {"density_high": dataset_valid["density_high"]}, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": 1, } sup_validator = ppsci.validate.SupervisedValidator( diff --git a/examples/topopt/conf/topopt.yaml b/examples/topopt/conf/topopt.yaml index 05130f7fd8..872393b0e9 100644 --- a/examples/topopt/conf/topopt.yaml +++ b/examples/topopt/conf/topopt.yaml @@ -21,6 +21,9 @@ hydra: - mode - vol_coeff - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback sweep: # output directory for multirun dir: ${hydra.run.dir} diff --git a/examples/topopt/topopt.py b/examples/topopt/topopt.py index 3346f9f2d3..ea20050f9e 100644 --- a/examples/topopt/topopt.py +++ b/examples/topopt/topopt.py @@ -29,11 +29,6 @@ def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # 4 training cases parameters LEARNING_RATE = cfg.TRAIN.learning_rate / (1 + cfg.TRAIN.epochs // 15) ITERS_PER_EPOCH = int(cfg.n_samples * cfg.train_test_ratio / cfg.TRAIN.batch_size) @@ -114,11 +109,6 @@ def train(cfg: DictConfig): # evaluate 4 models def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # fixed iteration stop times for evaluation iterations_stop_times = range(5, 85, 5) model = TopOptNN(**cfg.MODEL) From 1fefb1062690a49ec3b7d868669448238d534b32 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 25 Apr 2024 11:14:39 +0000 Subject: [PATCH 02/20] update code --- docs/zh/examples/amgnet.md | 8 ++-- examples/NLS-MB/NLS-MB_optical_rogue_wave.py | 3 -- examples/NLS-MB/NLS-MB_optical_soliton.py | 3 -- examples/allen_cahn/allen_cahn_causal.py | 1 - examples/allen_cahn/allen_cahn_plain.py | 1 - examples/amgnet/amgnet_airfoil.py | 1 - examples/amgnet/amgnet_cylinder.py | 1 - examples/aneurysm/aneurysm.py | 3 -- examples/aneurysm/aneurysm_flow.py | 1 - examples/biharmonic2d/biharmonic2d.py | 4 -- examples/bracket/bracket.py | 3 -- examples/bubble/bubble.py | 2 - examples/cfdgcn/cfdgcn.py | 1 - examples/control_arm/forward_analysis.py | 4 -- examples/control_arm/inverse_parameter.py | 3 -- .../2d_unsteady/cylinder2d_unsteady_Re100.py | 2 - examples/darcy/darcy2d.py | 3 -- examples/deepcfd/deepcfd.py | 2 - examples/euler_beam/euler_beam.py | 4 -- examples/fourcastnet/train_pretrain.py | 2 - examples/fpde/fractional_poisson_2d.py | 1 - examples/fsi/viv.py | 1 - examples/gpinn/poisson_1d.py | 2 - examples/heat_exchanger/heat_exchanger.py | 2 - examples/heat_pinn/heat_pinn.py | 3 -- examples/hpinns/holography.py | 1 - examples/ide/volterra_ide.py | 2 - examples/laplace/laplace2d.py | 3 -- examples/ldc/ldc2d_steady_Re10.py | 2 - examples/ldc/ldc2d_unsteady_Re10.py | 2 - examples/nsfnet/VP_NSFNet1.py | 16 ------- examples/nsfnet/VP_NSFNet2.py | 38 --------------- examples/nsfnet/VP_NSFNet3.py | 25 ---------- examples/nsfnet/VP_NSFNet4.py | 2 - examples/operator_learning/deeponet.py | 1 - examples/phylstm/phylstm2.py | 2 - examples/phylstm/phylstm3.py | 2 - examples/shock_wave/shock_wave.py | 2 - ppsci/solver/solver.py | 48 +++++++++++++++++++ 39 files changed, 52 insertions(+), 155 deletions(-) diff --git a/docs/zh/examples/amgnet.md b/docs/zh/examples/amgnet.md index 43bdae068e..f56929b875 100644 --- a/docs/zh/examples/amgnet.md +++ b/docs/zh/examples/amgnet.md @@ -103,17 +103,17 @@ unzip data.zip === "airfoil" - ``` py linenums="61" + ``` py linenums="55" --8<-- - examples/amgnet/amgnet_airfoil.py:61:62 + examples/amgnet/amgnet_airfoil.py:55:56 --8<-- ``` === "cylinder" - ``` py linenums="61" + ``` py linenums="55" --8<-- - examples/amgnet/amgnet_cylinder.py:61:62 + examples/amgnet/amgnet_cylinder.py:55:56 --8<-- ``` diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index d6da2c5c54..f1d7c52557 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -240,7 +240,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -266,7 +265,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -344,7 +342,6 @@ def evaluate(cfg: DictConfig): output_dir=cfg.output_dir, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index 0819e51662..dd5fe9a17b 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -217,7 +217,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -243,7 +242,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -321,7 +319,6 @@ def evaluate(cfg: DictConfig): output_dir=cfg.output_dir, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/allen_cahn/allen_cahn_causal.py b/examples/allen_cahn/allen_cahn_causal.py index 89840177a4..4dfb773d0a 100644 --- a/examples/allen_cahn/allen_cahn_causal.py +++ b/examples/allen_cahn/allen_cahn_causal.py @@ -167,7 +167,6 @@ def gen_label_batch(input_batch): log_freq=cfg.log_freq, eval_during_train=True, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, validator=validator, pretrained_model_path=cfg.TRAIN.pretrained_model_path, diff --git a/examples/allen_cahn/allen_cahn_plain.py b/examples/allen_cahn/allen_cahn_plain.py index e1cbcc10d3..ed9d6070a7 100644 --- a/examples/allen_cahn/allen_cahn_plain.py +++ b/examples/allen_cahn/allen_cahn_plain.py @@ -165,7 +165,6 @@ def gen_label_batch(input_batch): log_freq=cfg.log_freq, eval_during_train=True, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, validator=validator, pretrained_model_path=cfg.TRAIN.pretrained_model_path, diff --git a/examples/amgnet/amgnet_airfoil.py b/examples/amgnet/amgnet_airfoil.py index e1bbccccf2..b25c7bee25 100644 --- a/examples/amgnet/amgnet_airfoil.py +++ b/examples/amgnet/amgnet_airfoil.py @@ -167,7 +167,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/amgnet/amgnet_cylinder.py b/examples/amgnet/amgnet_cylinder.py index 05c486fd96..ff2140eaa3 100644 --- a/examples/amgnet/amgnet_cylinder.py +++ b/examples/amgnet/amgnet_cylinder.py @@ -167,7 +167,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index c67ba7dd42..2a19e4efb2 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -231,9 +231,7 @@ def inlet_w_ref_func(_in): log_freq=cfg.log_freq, eval_during_train=True, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.TRAIN.pretrained_model_path, @@ -321,7 +319,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index 8c788c8072..37a0c3e975 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -278,7 +278,6 @@ def output_transform_p(self, in_, out): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, ) diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index a5d0d57385..20a1938ddc 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -231,9 +231,7 @@ def train(cfg: DictConfig): cfg.TRAIN.iters_per_epoch, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, equation=equation, - geom=geom, checkpoint_path=cfg.TRAIN.checkpoint_path, pretrained_model_path=cfg.TRAIN.pretrained_model_path, ) @@ -252,9 +250,7 @@ def train(cfg: DictConfig): 1, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, equation=equation, - geom=geom, checkpoint_path=cfg.TRAIN.checkpoint_path, pretrained_model_path=cfg.TRAIN.pretrained_model_path, ) diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index d1251d23e1..2907046424 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -329,9 +329,7 @@ def train(cfg: DictConfig): log_freq=cfg.log_freq, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -497,7 +495,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index b0b8e6c0f2..b3126e9739 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -186,7 +186,6 @@ def transform_out(in_, out): cfg.TRAIN.iters_per_epoch, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, - geom=geom, validator=validator, ) # train model @@ -341,7 +340,6 @@ def transform_out(in_, out): solver = ppsci.solver.Solver( model_list, output_dir=cfg.output_dir, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/cfdgcn/cfdgcn.py b/examples/cfdgcn/cfdgcn.py index cdc46a32a4..b63278e4be 100644 --- a/examples/cfdgcn/cfdgcn.py +++ b/examples/cfdgcn/cfdgcn.py @@ -208,7 +208,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/control_arm/forward_analysis.py b/examples/control_arm/forward_analysis.py index abdfbb09bf..267a7966e5 100644 --- a/examples/control_arm/forward_analysis.py +++ b/examples/control_arm/forward_analysis.py @@ -184,9 +184,7 @@ def train(cfg: DictConfig): lr_scheduler, cfg.TRAIN.epochs, cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, equation=equation, - geom=geom, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, eval_freq=cfg.TRAIN.eval_freq, @@ -256,8 +254,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, output_dir=cfg.output_dir, - seed=cfg.seed, - geom=geom, log_freq=cfg.log_freq, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, visualizer=visualizer, diff --git a/examples/control_arm/inverse_parameter.py b/examples/control_arm/inverse_parameter.py index bc112a1e25..3b9a984416 100644 --- a/examples/control_arm/inverse_parameter.py +++ b/examples/control_arm/inverse_parameter.py @@ -134,9 +134,7 @@ def train(cfg: DictConfig): lr_scheduler, cfg.TRAIN.epochs, cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, equation=equation, - geom=geom, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, eval_freq=cfg.TRAIN.eval_freq, @@ -228,7 +226,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, log_freq=cfg.log_freq, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, validator=validator, diff --git a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py index 45c05b4198..8e46442fe5 100644 --- a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +++ b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py @@ -207,7 +207,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -284,7 +283,6 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - geom=geom, output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index e50cf01f55..c7d45441f1 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -151,7 +151,6 @@ def poisson_ref_compute_func(_in): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, ) @@ -180,7 +179,6 @@ def poisson_ref_compute_func(_in): eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, ) @@ -274,7 +272,6 @@ def poisson_ref_compute_func(_in): model, output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/deepcfd/deepcfd.py b/examples/deepcfd/deepcfd.py index a11628e88e..414a753bed 100644 --- a/examples/deepcfd/deepcfd.py +++ b/examples/deepcfd/deepcfd.py @@ -311,7 +311,6 @@ def metric_expr( epochs=cfg.TRAIN.epochs, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, validator=validator, checkpoint_path=cfg.TRAIN.checkpoint_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -413,7 +412,6 @@ def metric_expr( solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/euler_beam/euler_beam.py b/examples/euler_beam/euler_beam.py index 48cf3ac63f..1ca12acc54 100644 --- a/examples/euler_beam/euler_beam.py +++ b/examples/euler_beam/euler_beam.py @@ -114,9 +114,7 @@ def u_solution_func(out): iters_per_epoch=cfg.TRAIN.iters_per_epoch, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.TRAIN.pretrained_model_path, @@ -184,9 +182,7 @@ def u_solution_func(out): None, cfg.output_dir, None, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index bb24a2b602..b3be2d93c9 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -158,7 +158,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=True, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -231,7 +230,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/fpde/fractional_poisson_2d.py b/examples/fpde/fractional_poisson_2d.py index 6acb518f5f..b95230ae3a 100644 --- a/examples/fpde/fractional_poisson_2d.py +++ b/examples/fpde/fractional_poisson_2d.py @@ -173,7 +173,6 @@ def input_data_fpde_transform( eval_during_train=True, eval_freq=EVAL_FREQ, equation=equation, - geom=geom, validator=validator, eval_with_no_grad=True, ) diff --git a/examples/fsi/viv.py b/examples/fsi/viv.py index 1b50f2d39a..d6b3fbf2ec 100644 --- a/examples/fsi/viv.py +++ b/examples/fsi/viv.py @@ -111,7 +111,6 @@ def train(cfg: DictConfig): log_freq=cfg.log_freq, eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, equation=equation, validator=validator, visualizer=visualizer, diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index b7436e77fa..840cc10d46 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -138,7 +138,6 @@ def u_solution(in_): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, pretrained_model_path=cfg.TRAIN.pretrained_model_path, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -255,7 +254,6 @@ def u_solution(in_): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/heat_exchanger/heat_exchanger.py b/examples/heat_exchanger/heat_exchanger.py index e3859c01eb..c32d144fcd 100644 --- a/examples/heat_exchanger/heat_exchanger.py +++ b/examples/heat_exchanger/heat_exchanger.py @@ -338,7 +338,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, ) # train model @@ -542,7 +541,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/heat_pinn/heat_pinn.py b/examples/heat_pinn/heat_pinn.py index d1400ca931..5cf81d1805 100644 --- a/examples/heat_pinn/heat_pinn.py +++ b/examples/heat_pinn/heat_pinn.py @@ -117,9 +117,7 @@ def train(cfg: DictConfig): iters_per_epoch=cfg.TRAIN.iters_per_epoch, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, equation=equation, - geom=geom, pretrained_model_path=cfg.TRAIN.pretrained_model_path, checkpoint_path=cfg.TRAIN.checkpoint_path, ) @@ -215,7 +213,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) # begin eval diff --git a/examples/hpinns/holography.py b/examples/hpinns/holography.py index e8e3d0dbd6..77bd3e7c79 100644 --- a/examples/hpinns/holography.py +++ b/examples/hpinns/holography.py @@ -391,7 +391,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/examples/ide/volterra_ide.py b/examples/ide/volterra_ide.py index 8289b1c7a3..70d106a2b9 100644 --- a/examples/ide/volterra_ide.py +++ b/examples/ide/volterra_ide.py @@ -164,7 +164,6 @@ def u_solution_func(in_): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, pretrained_model_path=cfg.TRAIN.pretrained_model_path, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -219,7 +218,6 @@ def u_solution_func(in_) -> np.ndarray: solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - geom=geom, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/laplace/laplace2d.py b/examples/laplace/laplace2d.py index 39d6f959a0..106cbf78f6 100644 --- a/examples/laplace/laplace2d.py +++ b/examples/laplace/laplace2d.py @@ -113,7 +113,6 @@ def u_solution_func(out): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, ) @@ -179,9 +178,7 @@ def u_solution_func(out): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index e1ac1046b0..bbd139a35d 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -147,7 +147,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -212,7 +211,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index 929d71f284..9a509674f1 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -190,7 +190,6 @@ def train(cfg: DictConfig): eval_during_train=cfg.TRAIN.eval_during_train, eval_freq=cfg.TRAIN.eval_freq, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, ) @@ -286,7 +285,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, equation=equation, - geom=geom, validator=validator, visualizer=visualizer, pretrained_model_path=cfg.EVAL.pretrained_model_path, diff --git a/examples/nsfnet/VP_NSFNet1.py b/examples/nsfnet/VP_NSFNet1.py index 7b06b69cd8..3edefa7c9c 100644 --- a/examples/nsfnet/VP_NSFNet1.py +++ b/examples/nsfnet/VP_NSFNet1.py @@ -194,7 +194,6 @@ def train(cfg: DictConfig): eval_freq=cfg.eval_freq, seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, eval_with_no_grad=False, @@ -229,7 +228,6 @@ def train(cfg: DictConfig): eval_freq=2000, seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, eval_with_no_grad=False, @@ -243,25 +241,15 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - SEED = cfg.seed - # set model model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - # set the Reynolds number and the corresponding lambda which is the parameter in the exact solution. Re = cfg.re lam = 0.5 * Re - np.sqrt(0.25 * (Re**2) + 4 * (np.pi**2)) - x_train = (np.random.rand(N_TRAIN, 1) - 1 / 3) * 3 / 2 - y_train = (np.random.rand(N_TRAIN, 1) - 1 / 4) * 2 - # generate test data - np.random.seed(SEED) x_star = ((np.random.rand(1000, 1) - 1 / 3) * 3 / 2).astype("float32") y_star = ((np.random.rand(1000, 1) - 1 / 4) * 2).astype("float32") u_star = 1 - np.exp(lam * x_star) * np.cos(2 * np.pi * y_star) @@ -277,9 +265,6 @@ def evaluate(cfg: DictConfig): "total_size": u_star.shape[0], "batch_size": u_star.shape[0], } - - geom = ppsci.geometry.PointCloud({"x": x_train, "y": y_train}, ("x", "y")) - # set equation constarint s.t. ||F(u)|| equation = { "NavierStokes": ppsci.equation.NavierStokes( @@ -306,7 +291,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, ) diff --git a/examples/nsfnet/VP_NSFNet2.py b/examples/nsfnet/VP_NSFNet2.py index 05a204d983..8bd5d5aed6 100644 --- a/examples/nsfnet/VP_NSFNet2.py +++ b/examples/nsfnet/VP_NSFNet2.py @@ -267,7 +267,6 @@ def train(cfg: DictConfig): eval_freq=cfg.eval_freq, seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, eval_with_no_grad=False, @@ -288,9 +287,6 @@ def evaluate(cfg: DictConfig): model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - data = scipy.io.loadmat(cfg.data_dir) U_star = data["U_star"].astype("float32") # N x 2 x T @@ -299,38 +295,10 @@ def evaluate(cfg: DictConfig): X_star = data["X_star"].astype("float32") # N x 2 N = X_star.shape[0] - T = t_star.shape[0] # rearrange data - XX = np.tile(X_star[:, 0:1], (1, T)) # N x T - YY = np.tile(X_star[:, 1:2], (1, T)) # N x T TT = np.tile(t_star, (1, N)).T # N x T - UU = U_star[:, 0, :] # N x T - VV = U_star[:, 1, :] # N x T - PP = P_star # N x T - - x = XX.flatten()[:, None] # NT x 1 - y = YY.flatten()[:, None] # NT x 1 - t = TT.flatten()[:, None] # NT x 1 - - u = UU.flatten()[:, None] # NT x 1 - v = VV.flatten()[:, None] # NT x 1 - p = PP.flatten()[:, None] # NT x 1 - - data1 = np.concatenate([x, y, t, u, v, p], 1) - data2 = data1[:, :][data1[:, 2] <= 7] - data3 = data2[:, :][data2[:, 0] >= 1] - data4 = data3[:, :][data3[:, 0] <= 8] - data5 = data4[:, :][data4[:, 1] >= -2] - data_domain = data5[:, :][data5[:, 1] <= 2] - - idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) - - x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) - y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) - t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) - snap = np.array([0]) x_star = X_star[:, 0:1] y_star = X_star[:, 1:2] @@ -349,11 +317,6 @@ def evaluate(cfg: DictConfig): "total_size": u_star.shape[0], "batch_size": u_star.shape[0], } - - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") - ) - # set equation constarint s.t. ||F(u)|| equation = { "NavierStokes": ppsci.equation.NavierStokes(nu=0.01, rho=1.0, dim=2, time=True), @@ -377,7 +340,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, ) diff --git a/examples/nsfnet/VP_NSFNet3.py b/examples/nsfnet/VP_NSFNet3.py index d790c49123..bc6a991174 100644 --- a/examples/nsfnet/VP_NSFNet3.py +++ b/examples/nsfnet/VP_NSFNet3.py @@ -325,9 +325,7 @@ def train(cfg: DictConfig): eval_during_train=True, log_freq=cfg.log_freq, eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, - geom=geom, validator=validator, visualizer=None, eval_with_no_grad=False, @@ -343,28 +341,10 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): OUTPUT_DIR = cfg.output_dir - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) - # set model model = ppsci.arch.MLP(**cfg.MODEL) ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - # set the number of residual samples - N_TRAIN = cfg.ntrain - - # unsupervised part - xx = np.random.randint(31, size=N_TRAIN) / 15 - 1 - yy = np.random.randint(31, size=N_TRAIN) / 15 - 1 - zz = np.random.randint(31, size=N_TRAIN) / 15 - 1 - tt = np.random.randint(11, size=N_TRAIN) / 10 - - x_train = xx.reshape(xx.shape[0], 1).astype("float32") - y_train = yy.reshape(yy.shape[0], 1).astype("float32") - z_train = zz.reshape(zz.shape[0], 1).astype("float32") - t_train = tt.reshape(tt.shape[0], 1).astype("float32") - # test data x_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") y_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") @@ -384,10 +364,6 @@ def evaluate(cfg: DictConfig): "total_size": u_star.shape[0], "batch_size": u_star.shape[0], } - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") - ) - equation = { "NavierStokes": ppsci.equation.NavierStokes( nu=1.0 / cfg.re, rho=1.0, dim=3, time=True @@ -412,7 +388,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - geom=geom, validator=validator, ) diff --git a/examples/nsfnet/VP_NSFNet4.py b/examples/nsfnet/VP_NSFNet4.py index f60f766074..4ee56ce574 100644 --- a/examples/nsfnet/VP_NSFNet4.py +++ b/examples/nsfnet/VP_NSFNet4.py @@ -294,9 +294,7 @@ def train(cfg: DictConfig): save_freq=cfg.TRAIN.save_freq, eval_freq=cfg.TRAIN.eval_freq, eval_during_train=True, - seed=cfg.seed, equation=equation, - geom=geom, validator=validator, eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, ) diff --git a/examples/operator_learning/deeponet.py b/examples/operator_learning/deeponet.py index 51328492ab..79ffa56745 100644 --- a/examples/operator_learning/deeponet.py +++ b/examples/operator_learning/deeponet.py @@ -72,7 +72,6 @@ def train(cfg: DictConfig): save_freq=cfg.TRAIN.save_freq, eval_freq=cfg.TRAIN.eval_freq, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, eval_during_train=cfg.TRAIN.eval_during_train, checkpoint_path=cfg.TRAIN.checkpoint_path, diff --git a/examples/phylstm/phylstm2.py b/examples/phylstm/phylstm2.py index aecb4e0b93..8bf184292a 100755 --- a/examples/phylstm/phylstm2.py +++ b/examples/phylstm/phylstm2.py @@ -173,7 +173,6 @@ def train(cfg: DictConfig): cfg.TRAIN.iters_per_epoch, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator_pde, checkpoint_path=cfg.TRAIN.checkpoint_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -296,7 +295,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator_pde, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/phylstm/phylstm3.py b/examples/phylstm/phylstm3.py index 6e5c4130be..c8f424168e 100755 --- a/examples/phylstm/phylstm3.py +++ b/examples/phylstm/phylstm3.py @@ -178,7 +178,6 @@ def train(cfg: DictConfig): cfg.TRAIN.iters_per_epoch, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator_pde, checkpoint_path=cfg.TRAIN.checkpoint_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -309,7 +308,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, validator=validator_pde, pretrained_model_path=cfg.EVAL.pretrained_model_path, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/shock_wave/shock_wave.py b/examples/shock_wave/shock_wave.py index 8523878768..13b4920d46 100644 --- a/examples/shock_wave/shock_wave.py +++ b/examples/shock_wave/shock_wave.py @@ -403,7 +403,6 @@ def train(cfg: DictConfig): cfg.TRAIN.iters_per_epoch, save_freq=cfg.TRAIN.save_freq, log_freq=cfg.log_freq, - seed=cfg.seed, equation=equation, pretrained_model_path=cfg.TRAIN.pretrained_model_path, checkpoint_path=cfg.TRAIN.checkpoint_path, @@ -426,7 +425,6 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=cfg.output_dir, - seed=cfg.seed, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, pretrained_model_path=cfg.EVAL.pretrained_model_path, ) diff --git a/ppsci/solver/solver.py b/ppsci/solver/solver.py index 2fea206c6c..6f6e93abdf 100644 --- a/ppsci/solver/solver.py +++ b/ppsci/solver/solver.py @@ -975,3 +975,51 @@ def plot_loss_history( smooth_step=smooth_step, use_semilogy=use_semilogy, ) + + # def _parse_params_from_cfg(self, cfg: DictConfig): + # """Parse hyper-parameters from DictConfig.""" + # # general paremters + # ## output directory + # self.output_dir = cfg.output_dir + # ## logging frequency + # self.log_freq = cfg.log_freq + + # # training related paramters + # self.epochs = cfg.TRAIN.epochs + # self.iters_per_epoch = cfg.TRAIN.iters_per_epoch + # ## set update_freq for gradient accumulation + # self.update_freq = cfg.TRAIN.update_freq + # ## set checkpoint saving frequency + # self.save_freq = cfg.TRAIN.save_freq + # self.eval_during_train = cfg.TRAIN.eval_during_train + # self.start_eval_epoch = cfg.TRAIN.start_eval_epoch + # self.eval_freq = cfg.TRAIN.eval_freq + + # # evaluating related paramters + # self.device = cfg.device + + # # set automatic mixed precision(AMP) configuration + # self.use_amp = cfg.use_amp + # self.amp_level = cfg.amp_level + + # # whether calculate metrics by each batch during evaluation, mainly for memory efficiency + # self.compute_metric_by_batch = compute_metric_by_batch + # # whether set `stop_gradient=True` for every Tensor if no differentiation involved during evaluation + # self.eval_with_no_grad = eval_with_no_grad + + # # set moving average model(optional) + # if self.cfg and any(key in self.cfg.TRAIN for key in ["ema", "swa"]): + # if "ema" in self.cfg.TRAIN: + # self.avg_freq = self.cfg.TRAIN.ema.avg_freq + # elif "swa" in self.cfg.TRAIN: + # self.avg_freq = self.cfg.TRAIN.swa.avg_freq + + # # load pretrained model, usually used for transfer learning + # self.pretrained_model_path = pretrained_model_path + + # # set up benchmark flag, will print memory stat if enabled + # self.benchmark_flag: bool = os.getenv("BENCHMARK_ROOT", None) is not None + + # # set up nvtx flag for nsight analysis + # self.nvtx_flag: bool = os.getenv("NVTX", None) is not None + # self.forward_helper.nvtx_flag = self.nvtx_flag From 048649312b0655d5e62b1834fa6f84bcde3aa277 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 30 Apr 2024 03:23:33 +0000 Subject: [PATCH 03/20] support default config node --- examples/NLS-MB/NLS-MB_optical_rogue_wave.py | 23 +-- examples/NLS-MB/NLS-MB_optical_soliton.py | 18 +- examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml | 9 + examples/NLS-MB/conf/NLS-MB_soliton.yaml | 9 + examples/RegAE/RegAE.py | 16 +- examples/RegAE/conf/RegAE.yaml | 9 + examples/advection/conf/adv_plain.yaml | 96 +++++++++ examples/allen_cahn/allen_cahn_causal.py | 21 +- examples/allen_cahn/allen_cahn_plain.py | 21 +- .../allen_cahn/conf/allen_cahn_causal.yaml | 96 +++++++++ .../conf/allen_cahn_causal_fourier.yaml | 99 ++++++++++ .../conf/allen_cahn_causal_fourier_rwf.yaml | 9 + .../allen_cahn/conf/allen_cahn_fourier.yaml | 96 +++++++++ examples/amgnet/amgnet_airfoil.py | 16 +- examples/amgnet/amgnet_cylinder.py | 16 +- examples/amgnet/conf/amgnet_airfoil.yaml | 9 + examples/amgnet/conf/amgnet_cylinder.yaml | 9 + examples/aneurysm/aneurysm.py | 21 +- examples/aneurysm/aneurysm_flow.py | 15 +- examples/aneurysm/conf/aneurysm.yaml | 9 + examples/aneurysm/conf/aneurysm_flow.yaml | 9 + examples/biharmonic2d/biharmonic2d.py | 23 +-- examples/biharmonic2d/conf/biharmonic2d.yaml | 9 + examples/bracket/bracket.py | 18 +- examples/bracket/conf/bracket.yaml | 9 + examples/bubble/bubble.py | 12 +- examples/bubble/conf/bubble.yaml | 9 + examples/cfdgcn/cfdgcn.py | 17 +- examples/cfdgcn/conf/cfdgcn.yaml | 9 + examples/chip_heat/chip_heat.py | 12 +- examples/chip_heat/conf/chip_heat.yaml | 9 + .../control_arm/conf/forward_analysis.yaml | 9 + .../control_arm/conf/inverse_parameter.yaml | 9 + examples/control_arm/forward_analysis.py | 18 +- examples/control_arm/inverse_parameter.py | 18 +- .../convection_diffusion/conf/5_best.yaml | 90 +++++++++ examples/convection_diffusion/conf/6.yaml | 89 +++++++++ examples/darcy/conf/darcy2d.yaml | 9 + examples/darcy/darcy2d.py | 18 +- examples/deepcfd/conf/deepcfd.yaml | 9 + examples/deepcfd/deepcfd.py | 13 +- examples/deephpms/burgers.py | 24 +-- examples/deephpms/conf/burgers.yaml | 9 + examples/deephpms/conf/korteweg_de_vries.yaml | 9 + .../deephpms/conf/kuramoto_sivashinsky.yaml | 9 + examples/deephpms/conf/navier_stokes.yaml | 9 + examples/deephpms/conf/schrodinger.yaml | 9 + examples/deephpms/korteweg_de_vries.py | 22 +-- examples/deephpms/kuramoto_sivashinsky.py | 24 +-- examples/deephpms/navier_stokes.py | 24 +-- examples/deephpms/schrodinger.py | 24 +-- examples/dgmr/conf/dgmr.yaml | 9 + examples/dgmr/dgmr.py | 2 +- examples/epnn/conf/epnn.yaml | 9 + examples/epnn/epnn.py | 14 +- examples/euler_beam/conf/euler_beam.yaml | 9 + examples/euler_beam/euler_beam.py | 21 +- .../conf/fourcastnet_finetune.yaml | 9 + .../fourcastnet/conf/fourcastnet_precip.yaml | 9 + .../conf/fourcastnet_pretrain.yaml | 9 + examples/fourcastnet/train_finetune.py | 16 +- examples/fourcastnet/train_precip.py | 15 +- examples/fourcastnet/train_pretrain.py | 16 +- examples/fpde/fractional_poisson_2d.py | 180 +++++++++++------ examples/fsi/conf/viv.yaml | 10 +- examples/fsi/viv.py | 19 +- examples/gpinn/conf/poisson_1d.yaml | 9 + examples/gpinn/poisson_1d.py | 14 +- .../heat_exchanger/conf/heat_exchanger.yaml | 9 + examples/heat_exchanger/heat_exchanger.py | 12 +- examples/heat_pinn/conf/heat_pinn.yaml | 9 + examples/heat_pinn/heat_pinn.py | 14 +- examples/hpinns/conf/hpinns.yaml | 9 + examples/hpinns/holography.py | 30 +-- examples/ide/conf/volterra_ide.yaml | 9 + examples/ide/volterra_ide.py | 17 +- examples/laplace/conf/laplace2d.yaml | 9 + examples/laplace/laplace2d.py | 13 +- examples/ldc/conf/ldc2d_modulus.yaml | 69 +++++++ .../ldc2d_modulus_importance_sampling.yaml | 69 +++++++ examples/ldc/conf/ldc2d_steady_Re10.yaml | 9 + examples/ldc/conf/ldc2d_unsteady_Re10.yaml | 9 + examples/ldc/ldc2d_steady_Re10.py | 13 +- examples/ldc/ldc2d_unsteady_Re10.py | 12 +- examples/lorenz/conf/enn.yaml | 9 + examples/lorenz/conf/transformer.yaml | 9 + examples/lorenz/train_enn.py | 11 +- examples/lorenz/train_transformer.py | 14 +- examples/nowcastnet/conf/nowcastnet.yaml | 9 + examples/nowcastnet/nowcastnet.py | 2 +- examples/nsfnet/VP_NSFNet1.py | 24 +-- examples/nsfnet/VP_NSFNet2.py | 19 +- examples/nsfnet/VP_NSFNet3.py | 14 +- examples/nsfnet/VP_NSFNet4.py | 17 +- examples/nsfnet/conf/VP_NSFNet4.yaml | 9 + examples/operator_learning/conf/deeponet.yaml | 9 + examples/operator_learning/deeponet.py | 17 +- examples/phycrnet/conf/burgers_equations.yaml | 9 + .../conf/fitzhugh_nagumo_RD_equation.yaml | 9 + .../conf/lambda_omega_RD_equation.yaml | 9 + examples/phycrnet/main.py | 9 +- examples/phygeonet/conf/heat_equation.yaml | 9 + .../phygeonet/conf/heat_equation_with_bc.yaml | 9 + examples/phygeonet/heat_equation.py | 8 +- examples/phygeonet/heat_equation_with_bc.py | 8 +- examples/phylstm/conf/phylstm2.yaml | 9 + examples/phylstm/conf/phylstm3.yaml | 9 + examples/phylstm/phylstm2.py | 15 +- examples/phylstm/phylstm3.py | 15 +- examples/pipe/conf/poiseuille_flow.yaml | 9 + examples/pipe/poiseuille_flow.py | 12 +- examples/rossler/conf/enn.yaml | 9 + examples/rossler/conf/transformer.yaml | 9 + examples/rossler/train_enn.py | 11 +- examples/rossler/train_transformer.py | 14 +- .../shock_wave/conf/shock_wave_Ma0.728.yaml | 9 + .../shock_wave/conf/shock_wave_Ma2.0.yaml | 9 + examples/shock_wave/shock_wave.py | 16 +- examples/tempoGAN/conf/tempogan.yaml | 9 + examples/tempoGAN/tempoGAN.py | 24 +-- examples/topopt/topopt.py | 6 +- examples/yinglong1/conf/yinglong_12.yaml | 9 + examples/yinglong1/conf/yinglong_24.yaml | 9 + ppsci/solver/solver.py | 187 +++++++++--------- ppsci/utils/config.py | 181 ++++++++++------- 125 files changed, 1755 insertions(+), 929 deletions(-) create mode 100644 examples/advection/conf/adv_plain.yaml create mode 100644 examples/allen_cahn/conf/allen_cahn_causal.yaml create mode 100644 examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml create mode 100644 examples/allen_cahn/conf/allen_cahn_fourier.yaml create mode 100644 examples/convection_diffusion/conf/5_best.yaml create mode 100644 examples/convection_diffusion/conf/6.yaml create mode 100644 examples/ldc/conf/ldc2d_modulus.yaml create mode 100644 examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index f1d7c52557..1e486fa7cb 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -233,14 +233,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -258,14 +254,11 @@ def train(cfg: DictConfig): model, constraint, OUTPUT_DIR, - optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, - eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, - eval_freq=cfg.TRAIN.lbfgs.eval_freq, + optimizer=optimizer_lbfgs, + epochs=EPOCHS, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -339,11 +332,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, equation=equation, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() @@ -371,7 +362,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index dd5fe9a17b..5dbf431c15 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -210,14 +210,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -236,13 +232,11 @@ def train(cfg: DictConfig): constraint, OUTPUT_DIR, optimizer_lbfgs, - None, EPOCHS, cfg.TRAIN.lbfgs.iters_per_epoch, - eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, - eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -316,11 +310,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, equation=equation, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() @@ -348,7 +340,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml index 3e1c2122e2..04388374f9 100644 --- a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml +++ b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/NLS-MB/conf/NLS-MB_soliton.yaml b/examples/NLS-MB/conf/NLS-MB_soliton.yaml index 7d5f9ef8db..f0a282e593 100644 --- a/examples/NLS-MB/conf/NLS-MB_soliton.yaml +++ b/examples/NLS-MB/conf/NLS-MB_soliton.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/RegAE/RegAE.py b/examples/RegAE/RegAE.py index c662933ad5..f14a4425ca 100644 --- a/examples/RegAE/RegAE.py +++ b/examples/RegAE/RegAE.py @@ -88,16 +88,9 @@ def loss_expr(output_dict, label_dict, weight_dict=None): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -135,11 +128,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - None, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/RegAE/conf/RegAE.yaml b/examples/RegAE/conf/RegAE.yaml index 433b05eea7..b182152684 100644 --- a/examples/RegAE/conf/RegAE.yaml +++ b/examples/RegAE/conf/RegAE.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/advection/conf/adv_plain.yaml b/examples/advection/conf/adv_plain.yaml new file mode 100644 index 0000000000..50e2dc64a6 --- /dev/null +++ b/examples/advection/conf/adv_plain.yaml @@ -0,0 +1,96 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_advection_plain/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + t: [2*np.pi, False] + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + ema: + decay: 0.9 + avg_freq: 1 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/allen_cahn/allen_cahn_causal.py b/examples/allen_cahn/allen_cahn_causal.py index 4dfb773d0a..3a25de58ac 100644 --- a/examples/allen_cahn/allen_cahn_causal.py +++ b/examples/allen_cahn/allen_cahn_causal.py @@ -158,21 +158,9 @@ def gen_label_batch(input_batch): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, cfg=cfg, ) # train model @@ -221,11 +209,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training @@ -247,7 +232,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/allen_cahn/allen_cahn_plain.py b/examples/allen_cahn/allen_cahn_plain.py index ed9d6070a7..badb695c2a 100644 --- a/examples/allen_cahn/allen_cahn_plain.py +++ b/examples/allen_cahn/allen_cahn_plain.py @@ -156,21 +156,9 @@ def gen_label_batch(input_batch): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, cfg=cfg, ) # train model @@ -219,11 +207,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate after finished training @@ -245,7 +230,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/allen_cahn/conf/allen_cahn_causal.yaml b/examples/allen_cahn/conf/allen_cahn_causal.yaml new file mode 100644 index 0000000000..41d070d403 --- /dev/null +++ b/examples/allen_cahn/conf/allen_cahn_causal.yaml @@ -0,0 +1,96 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_causal/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, False] + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml new file mode 100644 index 0000000000..6a3ad83ded --- /dev/null +++ b/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml @@ -0,0 +1,99 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_causal_fourier/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, False] + fourier: + dim: 256 + scale: 1.0 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + # causal: + # n_chunks: 16 + # tol: 1.0 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml index f664532b4f..98637d33f6 100644 --- a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml +++ b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/allen_cahn/conf/allen_cahn_fourier.yaml b/examples/allen_cahn/conf/allen_cahn_fourier.yaml new file mode 100644 index 0000000000..f2214131d1 --- /dev/null +++ b/examples/allen_cahn/conf/allen_cahn_fourier.yaml @@ -0,0 +1,96 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_fourier/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, False] + fourier: + dim: 256 + scale: 1.0 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/amgnet/amgnet_airfoil.py b/examples/amgnet/amgnet_airfoil.py index b25c7bee25..ff03ccbcae 100644 --- a/examples/amgnet/amgnet_airfoil.py +++ b/examples/amgnet/amgnet_airfoil.py @@ -109,16 +109,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -165,11 +158,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() diff --git a/examples/amgnet/amgnet_cylinder.py b/examples/amgnet/amgnet_cylinder.py index ff2140eaa3..ca5c9f955a 100644 --- a/examples/amgnet/amgnet_cylinder.py +++ b/examples/amgnet/amgnet_cylinder.py @@ -109,16 +109,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -165,11 +158,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() diff --git a/examples/amgnet/conf/amgnet_airfoil.yaml b/examples/amgnet/conf/amgnet_airfoil.yaml index dc13d528ed..371cdfdd03 100644 --- a/examples/amgnet/conf/amgnet_airfoil.yaml +++ b/examples/amgnet/conf/amgnet_airfoil.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/amgnet/conf/amgnet_cylinder.yaml b/examples/amgnet/conf/amgnet_cylinder.yaml index 8bae989744..ed2f47849a 100644 --- a/examples/amgnet/conf/amgnet_cylinder.yaml +++ b/examples/amgnet/conf/amgnet_cylinder.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index 2a19e4efb2..25cc5036c9 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -222,21 +222,11 @@ def inlet_w_ref_func(_in): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -317,12 +307,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() @@ -337,7 +324,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index 37a0c3e975..e42d0b17e6 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -196,15 +196,9 @@ def output_transform_p(self, in_, out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - log_freq=cfg.log_freq, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=int(x.shape[0] / cfg.TRAIN.batch_size), - save_freq=cfg.save_freq, + optimizer=optimizer, equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) solver.train() @@ -276,10 +270,7 @@ def output_transform_p(self, in_, out): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) def model_predict( diff --git a/examples/aneurysm/conf/aneurysm.yaml b/examples/aneurysm/conf/aneurysm.yaml index 3c1586c451..fbac760c98 100644 --- a/examples/aneurysm/conf/aneurysm.yaml +++ b/examples/aneurysm/conf/aneurysm.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/aneurysm/conf/aneurysm_flow.yaml b/examples/aneurysm/conf/aneurysm_flow.yaml index 67bd9b0602..49c614f60d 100644 --- a/examples/aneurysm/conf/aneurysm_flow.yaml +++ b/examples/aneurysm/conf/aneurysm_flow.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index 20a1938ddc..54cb68a563 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -224,13 +224,7 @@ def train(cfg: DictConfig): solver_adam = ppsci.solver.Solver( disp_net, constraint, - cfg.output_dir, optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, equation=equation, checkpoint_path=cfg.TRAIN.checkpoint_path, pretrained_model_path=cfg.TRAIN.pretrained_model_path, @@ -243,16 +237,11 @@ def train(cfg: DictConfig): solver_lbfgs = ppsci.solver.Solver( disp_net, constraint, - cfg.output_dir, optimizer_lbfgs, - None, - 1, - 1, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, + epochs=1, + iters_per_epoch=1, equation=equation, - checkpoint_path=cfg.TRAIN.checkpoint_path, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver_lbfgs.train() @@ -264,7 +253,8 @@ def evaluate(cfg: DictConfig): # load pretrained model solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.EVAL.pretrained_model_path + model=disp_net, + cfg=cfg, ) # generate samples @@ -345,7 +335,8 @@ def export(cfg: DictConfig): # load pretrained model solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.INFER.pretrained_model_path + model=disp_net, + cfg=cfg, ) class Wrapped_Model(nn.Layer): diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index 70c5fab715..0a22886ee0 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index 2907046424..f50ce51536 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -320,20 +320,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -493,12 +484,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/bracket/conf/bracket.yaml b/examples/bracket/conf/bracket.yaml index fd6cef0fa8..4e34f7ead1 100644 --- a/examples/bracket/conf/bracket.yaml +++ b/examples/bracket/conf/bracket.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index b3126e9739..49c6f82467 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -179,14 +179,9 @@ def transform_out(in_, out): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -339,9 +334,8 @@ def transform_out(in_, out): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/bubble/conf/bubble.yaml b/examples/bubble/conf/bubble.yaml index 2e6212c38f..01be7207aa 100644 --- a/examples/bubble/conf/bubble.yaml +++ b/examples/bubble/conf/bubble.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/cfdgcn/cfdgcn.py b/examples/cfdgcn/cfdgcn.py index b63278e4be..fe61ea43ed 100644 --- a/examples/cfdgcn/cfdgcn.py +++ b/examples/cfdgcn/cfdgcn.py @@ -114,17 +114,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -206,11 +198,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model diff --git a/examples/cfdgcn/conf/cfdgcn.yaml b/examples/cfdgcn/conf/cfdgcn.yaml index a7c945461f..c84cd2a423 100644 --- a/examples/cfdgcn/conf/cfdgcn.yaml +++ b/examples/cfdgcn/conf/cfdgcn.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/chip_heat/chip_heat.py b/examples/chip_heat/chip_heat.py index 775231733e..59a6d6807b 100644 --- a/examples/chip_heat/chip_heat.py +++ b/examples/chip_heat/chip_heat.py @@ -498,14 +498,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -708,9 +703,8 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction result diff --git a/examples/chip_heat/conf/chip_heat.yaml b/examples/chip_heat/conf/chip_heat.yaml index e8d76a5969..11807dc9c5 100644 --- a/examples/chip_heat/conf/chip_heat.yaml +++ b/examples/chip_heat/conf/chip_heat.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/control_arm/conf/forward_analysis.yaml b/examples/control_arm/conf/forward_analysis.yaml index 9690391509..b54d2e824a 100644 --- a/examples/control_arm/conf/forward_analysis.yaml +++ b/examples/control_arm/conf/forward_analysis.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/control_arm/conf/inverse_parameter.yaml b/examples/control_arm/conf/inverse_parameter.yaml index fbe130a19b..1b96745203 100644 --- a/examples/control_arm/conf/inverse_parameter.yaml +++ b/examples/control_arm/conf/inverse_parameter.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/control_arm/forward_analysis.py b/examples/control_arm/forward_analysis.py index 267a7966e5..8b26abb530 100644 --- a/examples/control_arm/forward_analysis.py +++ b/examples/control_arm/forward_analysis.py @@ -179,19 +179,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, + optimizer=optimizer, equation=equation, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -253,11 +244,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # visualize prediction after finished training diff --git a/examples/control_arm/inverse_parameter.py b/examples/control_arm/inverse_parameter.py index 3b9a984416..cf060c1f1a 100644 --- a/examples/control_arm/inverse_parameter.py +++ b/examples/control_arm/inverse_parameter.py @@ -129,20 +129,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, + optimizer=optimizer, equation=equation, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # train model @@ -225,12 +216,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/convection_diffusion/conf/5_best.yaml b/examples/convection_diffusion/conf/5_best.yaml new file mode 100644 index 0000000000..43f33bb9ba --- /dev/null +++ b/examples/convection_diffusion/conf/5_best.yaml @@ -0,0 +1,90 @@ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_case5/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +log_grad_norm: false +log_loss: false + +mode: train +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +LAMBDA: 0.0625 +EPS: 10.0 +DA: 10.0 +K0: 1.0 +PE: 30.0 +l: 2 +L: 10 +H: 1.0 +T: 5 +DT: 0.1 +NY: 25 + +MT: 2 +FDM_C_PATH: ./datasets/case5_fdm_reaction_txyc.csv +FDM_B_PATH: ./datasets/case5_fdm_reaction_txby.csv +MODEL: + model_c: + input_keys: + - t + - x + - 'y' + output_keys: + - c10 + num_layers: 4 + hidden_size: 64 + output_dim: 10 + activation: tanh + model_b: + input_keys: + - t + - x + - 'y' + output_keys: + - b + num_layers: 1 + hidden_size: 16 + output_dim: 1 + activation: tanh +TRAIN: + epochs: 3000 + l_bfgs_epochs: 150 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 500 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.1 + by_epoch: true + weight: + IC: 100 + BC: 2000 + EQ: 30 + AD: 0.03 + pretrained_model_path: null +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: + metric_c: 8192 + metric_b: 8192 diff --git a/examples/convection_diffusion/conf/6.yaml b/examples/convection_diffusion/conf/6.yaml new file mode 100644 index 0000000000..171deaee6e --- /dev/null +++ b/examples/convection_diffusion/conf/6.yaml @@ -0,0 +1,89 @@ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_case6/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +log_grad_norm: false +log_loss: false + +mode: train +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 +LAMBDA: 0.0625 +EPS: 0.1 +DA: 0.1 +K0: 1.0 +PE: 30.0 +l: 2 +L: 10 +H: 1.0 +T: 250 +DT: 5 +NY: 25 +FDM_C_PATH: ./datasets/case6_fdm_reaction_txyc.csv +FDM_B_PATH: ./datasets/case6_fdm_reaction_txby.csv +MODEL: + model_c: + input_keys: + - t + - x + - 'y' + output_keys: + - c10 + num_layers: 4 + hidden_size: 64 + output_dim: 10 + activation: tanh + model_b: + input_keys: + - t + - x + - 'y' + output_keys: + - b + num_layers: 1 + hidden_size: 16 + output_dim: 1 + activation: tanh +TRAIN: + RUN_LBFGS: false + epochs: 3000 + l_bfgs_epochs: 150 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 500 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.1 + by_epoch: true + weight: + IC: 1.0e2 # 关键参数 + BC: 1.0e2 # 关键参数 + EQ: 1.0e3 # 关键参数 + AD: 1.0e3 # 关键参数 + pretrained_model_path: null +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: + metric_c: 8192 + metric_b: 8192 diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index 1efbd856a9..39dc0bcb30 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index c7d45441f1..079220aead 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -143,16 +143,11 @@ def poisson_ref_compute_func(_in): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -173,14 +168,14 @@ def poisson_ref_compute_func(_in): constraint, OUTPUT_DIR, optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, + epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -270,11 +265,10 @@ def poisson_ref_compute_func(_in): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction diff --git a/examples/deepcfd/conf/deepcfd.yaml b/examples/deepcfd/conf/deepcfd.yaml index 92ca6401f2..290e160d61 100644 --- a/examples/deepcfd/conf/deepcfd.yaml +++ b/examples/deepcfd/conf/deepcfd.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deepcfd/deepcfd.py b/examples/deepcfd/deepcfd.py index 414a753bed..cb1df30c52 100644 --- a/examples/deepcfd/deepcfd.py +++ b/examples/deepcfd/deepcfd.py @@ -306,14 +306,9 @@ def metric_expr( solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -411,10 +406,8 @@ def metric_expr( # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate diff --git a/examples/deephpms/burgers.py b/examples/deephpms/burgers.py index de1b528a57..dee4325d26 100644 --- a/examples/deephpms/burgers.py +++ b/examples/deephpms/burgers.py @@ -153,13 +153,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -217,13 +213,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -318,13 +310,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_sol, validator=validator_sol, + cfg=cfg, ) # train model diff --git a/examples/deephpms/conf/burgers.yaml b/examples/deephpms/conf/burgers.yaml index 0053252ff9..d99b716ef4 100644 --- a/examples/deephpms/conf/burgers.yaml +++ b/examples/deephpms/conf/burgers.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deephpms/conf/korteweg_de_vries.yaml b/examples/deephpms/conf/korteweg_de_vries.yaml index 9c78dedbea..9e56591ccd 100644 --- a/examples/deephpms/conf/korteweg_de_vries.yaml +++ b/examples/deephpms/conf/korteweg_de_vries.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deephpms/conf/kuramoto_sivashinsky.yaml b/examples/deephpms/conf/kuramoto_sivashinsky.yaml index 41308852c4..96016e578a 100644 --- a/examples/deephpms/conf/kuramoto_sivashinsky.yaml +++ b/examples/deephpms/conf/kuramoto_sivashinsky.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deephpms/conf/navier_stokes.yaml b/examples/deephpms/conf/navier_stokes.yaml index 14d3e0ea5b..dd4688b9fc 100644 --- a/examples/deephpms/conf/navier_stokes.yaml +++ b/examples/deephpms/conf/navier_stokes.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deephpms/conf/schrodinger.yaml b/examples/deephpms/conf/schrodinger.yaml index 5d0fb77da9..512240920f 100644 --- a/examples/deephpms/conf/schrodinger.yaml +++ b/examples/deephpms/conf/schrodinger.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/deephpms/korteweg_de_vries.py b/examples/deephpms/korteweg_de_vries.py index 8ab06e6092..67b7d2d242 100644 --- a/examples/deephpms/korteweg_de_vries.py +++ b/examples/deephpms/korteweg_de_vries.py @@ -160,13 +160,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, validator=validator_idn, + cfg=cfg, ) # train model @@ -224,13 +220,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -325,13 +317,9 @@ def transform_f_sol(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_sol, validator=validator_sol, + cfg=cfg, ) # train model diff --git a/examples/deephpms/kuramoto_sivashinsky.py b/examples/deephpms/kuramoto_sivashinsky.py index 6d324eea75..90e5b2c4e7 100644 --- a/examples/deephpms/kuramoto_sivashinsky.py +++ b/examples/deephpms/kuramoto_sivashinsky.py @@ -163,13 +163,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -227,13 +223,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -325,13 +317,9 @@ def transform_f_idn(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model diff --git a/examples/deephpms/navier_stokes.py b/examples/deephpms/navier_stokes.py index 9e0cf9e8ff..42a0f8bf4d 100644 --- a/examples/deephpms/navier_stokes.py +++ b/examples/deephpms/navier_stokes.py @@ -158,13 +158,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -237,13 +233,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -338,13 +330,9 @@ def transform_f(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model diff --git a/examples/deephpms/schrodinger.py b/examples/deephpms/schrodinger.py index 36fcf2913b..e79563fd1f 100644 --- a/examples/deephpms/schrodinger.py +++ b/examples/deephpms/schrodinger.py @@ -194,13 +194,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_idn, + cfg=cfg, ) # train model @@ -273,13 +269,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_pde, validator=validator_pde, + cfg=cfg, ) # train model @@ -389,13 +381,9 @@ def transform_fg(_in): solver = ppsci.solver.Solver( model_list, constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_idn, validator=validator_sol, + cfg=cfg, ) # train model diff --git a/examples/dgmr/conf/dgmr.yaml b/examples/dgmr/conf/dgmr.yaml index 30f6d00c07..f33785b4dd 100644 --- a/examples/dgmr/conf/dgmr.yaml +++ b/examples/dgmr/conf/dgmr.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/dgmr/dgmr.py b/examples/dgmr/dgmr.py index 52bd413323..95ceb9b336 100644 --- a/examples/dgmr/dgmr.py +++ b/examples/dgmr/dgmr.py @@ -202,7 +202,7 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.model.eval() diff --git a/examples/epnn/conf/epnn.yaml b/examples/epnn/conf/epnn.yaml index ed7d8f0ba4..081220f830 100644 --- a/examples/epnn/conf/epnn.yaml +++ b/examples/epnn/conf/epnn.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/epnn/epnn.py b/examples/epnn/epnn.py index 8a65663f13..139a817c5e 100755 --- a/examples/epnn/epnn.py +++ b/examples/epnn/epnn.py @@ -98,15 +98,9 @@ def _transform_in_stress(_in): solver = ppsci.solver.Solver( model_list_obj, constraint_pde, - cfg.output_dir, - optimizer_list, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_list, validator=validator_pde, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -171,10 +165,8 @@ def _transform_in_stress(_in): # initialize solver solver = ppsci.solver.Solver( model_list_obj, - output_dir=cfg.output_dir, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/euler_beam/conf/euler_beam.yaml b/examples/euler_beam/conf/euler_beam.yaml index 1937b53dd4..4802f4cbb3 100644 --- a/examples/euler_beam/conf/euler_beam.yaml +++ b/examples/euler_beam/conf/euler_beam.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/euler_beam/euler_beam.py b/examples/euler_beam/euler_beam.py index 1ca12acc54..0ea84d33f9 100644 --- a/examples/euler_beam/euler_beam.py +++ b/examples/euler_beam/euler_beam.py @@ -108,19 +108,11 @@ def u_solution_func(out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, + cfg=cfg, ) # train model solver.train() @@ -179,15 +171,10 @@ def u_solution_func(out): # initialize solver solver = ppsci.solver.Solver( model, - None, - cfg.output_dir, - None, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, + cfg=cfg, ) # evaluate after finished training solver.eval() @@ -202,7 +189,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/fourcastnet/conf/fourcastnet_finetune.yaml b/examples/fourcastnet/conf/fourcastnet_finetune.yaml index 91e2372df4..b593d684fd 100644 --- a/examples/fourcastnet/conf/fourcastnet_finetune.yaml +++ b/examples/fourcastnet/conf/fourcastnet_finetune.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/fourcastnet/conf/fourcastnet_precip.yaml b/examples/fourcastnet/conf/fourcastnet_precip.yaml index 6ac43b9244..cf9f428f2e 100644 --- a/examples/fourcastnet/conf/fourcastnet_precip.yaml +++ b/examples/fourcastnet/conf/fourcastnet_precip.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml index 902b785b7f..858b316e93 100644 --- a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml +++ b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index 5bb34ad9c5..70b1779ab9 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -162,16 +162,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -298,12 +291,9 @@ def output_wind_func(d, var_name, data_mean, data_std): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() # visualize prediction from pretrained_model_path diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index 069f10c227..88f41646f6 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -159,15 +159,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -296,12 +290,9 @@ def output_precip_func(d, var_name): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() # visualize prediction diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index b3be2d93c9..ef0c9e3b57 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -152,15 +152,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -228,12 +222,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/fpde/fractional_poisson_2d.py b/examples/fpde/fractional_poisson_2d.py index b95230ae3a..86d88bb883 100644 --- a/examples/fpde/fractional_poisson_2d.py +++ b/examples/fpde/fractional_poisson_2d.py @@ -19,32 +19,50 @@ from typing import Tuple from typing import Union +import hydra import numpy as np import paddle from matplotlib import cm from matplotlib import pyplot as plt +from omegaconf import DictConfig import ppsci -from ppsci.utils import config -from ppsci.utils import logger -if __name__ == "__main__": - args = config.parse_args() - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(42) - # set training hyper-parameters - EPOCHS = 20000 if not args.epochs else args.epochs - ITERS_PER_EPOCH = 1 +def plot(x, y, input_data, output_data, label_data): + fig = plt.figure() + # plot prediction + ax1 = fig.add_subplot(121, projection="3d") + surf1 = ax1.plot_surface( + x, y, output_data["u"], cmap=cm.jet, linewidth=0, antialiased=False + ) + ax1.set_zlim(0, 1.2) + ax1.set_xlabel(r"$x$") + ax1.set_ylabel(r"$y$") + ax1.set_zlabel(r"$z$") + ax1.set_title(r"$u(x,y), label$") + fig.colorbar(surf1, ax=ax1, aspect=5, orientation="horizontal") - # set output directory - OUTPUT_DIR = ( - "./output_fractional_poisson_2d" if not args.output_dir else args.output_dir + # plot label + ax2 = fig.add_subplot(122, projection="3d") + surf2 = ax2.plot_surface( + x, y, label_data, cmap=cm.jet, linewidth=0, antialiased=False ) - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + ax2.set_zlim(0, 1.2) + ax2.set_xlabel("x") + ax2.set_ylabel("y") + ax2.set_zlabel("z") + ax2.set_title(r"$u(x,y), prediction$") + + # Add a color bar which maps values to colors. + fig.colorbar(surf2, ax=ax2, aspect=5, orientation="horizontal") + fig.subplots_adjust(wspace=0.5, hspace=0.5) + plt.savefig("fractional_poisson_2d_result.png", dpi=400) + +def train(cfg: DictConfig): # set model - model = ppsci.arch.MLP(("x", "y"), ("u",), 4, 20) + model = ppsci.arch.MLP(**cfg.MODEL) def output_transform(in_, out): return {"u": (1 - (in_["x"] ** 2 + in_["y"] ** 2)) * out["u"]} @@ -55,19 +73,19 @@ def output_transform(in_, out): geom = {"disk": ppsci.geometry.Disk((0, 0), 1)} # set equation - ALPHA = 1.8 - equation = {"fpde": ppsci.equation.FractionalPoisson(ALPHA, geom["disk"], [8, 100])} + equation = { + "fpde": ppsci.equation.FractionalPoisson(cfg.ALPHA, geom["disk"], [8, 100]) + } # set constraint - NPOINT_INTERIOR = 100 - NPOINT_BC = 1 - def u_solution_func( out: Dict[str, Union[paddle.Tensor, np.ndarray]] ) -> Union[paddle.Tensor, np.ndarray]: if isinstance(out["x"], paddle.Tensor): - return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + ALPHA / 2) - return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + ALPHA / 2) + return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( + 1 + cfg.ALPHA / 2 + ) + return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) # set transform for input data def input_data_fpde_transform( @@ -114,8 +132,8 @@ def input_data_fpde_transform( }, ), }, - "batch_size": NPOINT_INTERIOR, - "iters_per_epoch": ITERS_PER_EPOCH, + "batch_size": cfg.NPOINT_INTERIOR, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, }, ppsci.loss.MSELoss("mean"), random="Hammersley", @@ -128,11 +146,10 @@ def input_data_fpde_transform( geom["disk"], { "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": NPOINT_BC, - "iters_per_epoch": ITERS_PER_EPOCH, + "batch_size": cfg.NPOINT_BC, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, }, ppsci.loss.MSELoss("mean"), - random="Hammersley", criteria=lambda x, y: np.isclose(x, -1), name="BC", ) @@ -143,18 +160,16 @@ def input_data_fpde_transform( } # set optimizer - optimizer = ppsci.optimizer.Adam(1e-3)(model) + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) # set validator - NPOINT_EVAL = 1000 - EVAL_FREQ = 1000 l2rel_metric = ppsci.validate.GeometryValidator( {"u": lambda out: out["u"]}, {"u": u_solution_func}, geom["disk"], { "dataset": "IterableNamedArrayDataset", - "total_size": NPOINT_EVAL, + "total_size": cfg.NPOINT_EVAL, }, ppsci.loss.MSELoss(), metric={"L2Rel": ppsci.metric.L2Rel()}, @@ -166,15 +181,10 @@ def input_data_fpde_transform( solver = ppsci.solver.Solver( model, constraint, - OUTPUT_DIR, - optimizer, - epochs=EPOCHS, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - eval_freq=EVAL_FREQ, + optimizer=optimizer, equation=equation, validator=validator, - eval_with_no_grad=True, + cfg=cfg, ) # train model solver.train() @@ -193,32 +203,80 @@ def input_data_fpde_transform( label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) output_data = solver.predict(input_data, return_numpy=True) output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} + plot(x, y, input_data, output_data, label_data) - fig = plt.figure() - # plot prediction - ax1 = fig.add_subplot(121, projection="3d") - surf1 = ax1.plot_surface( - x, y, output_data["u"], cmap=cm.jet, linewidth=0, antialiased=False + +def evaluate(cfg: DictConfig): + # load model + model = ppsci.load_model(cfg.pretrained_model_path) + # set geometry + geom = { + "disk": ppsci.geometry.Disk(np.array([0, 0]), np.array([1]), np.array([[0]])), + } + + def u_solution_func( + out: Dict[str, Union[paddle.Tensor, np.ndarray]] + ) -> Union[paddle.Tensor, np.ndarray]: + if isinstance(out["x"], paddle.Tensor): + return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( + 1 + cfg.ALPHA / 2 + ) + return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) + + # set validator + l2rel_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["disk"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.NPOINT_EVAL, + }, + ppsci.loss.MSELoss(), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Metric", ) - ax1.set_zlim(0, 1.2) - ax1.set_xlabel(r"$x$") - ax1.set_ylabel(r"$y$") - ax1.set_zlabel(r"$z$") - ax1.set_title(r"$u(x,y), label$") - fig.colorbar(surf1, ax=ax1, aspect=5, orientation="horizontal") + validator = {l2rel_metric.name: l2rel_metric} - # plot label - ax2 = fig.add_subplot(122, projection="3d") - surf2 = ax2.plot_surface( - x, y, label_data, cmap=cm.jet, linewidth=0, antialiased=False + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, ) - ax2.set_zlim(0, 1.2) - ax2.set_xlabel("x") - ax2.set_ylabel("y") - ax2.set_zlabel("z") - ax2.set_title(r"$u(x,y), prediction$") + # train model + solver.train() - # Add a color bar which maps values to colors. - fig.colorbar(surf2, ax=ax2, aspect=5, orientation="horizontal") - fig.subplots_adjust(wspace=0.5, hspace=0.5) - plt.savefig("fractional_poisson_2d_result.png", dpi=400) + # visualize prediction after finished training + theta = np.arange(0, 2 * math.pi, 0.04) + rho = np.arange(0, 1, 0.005) + mt, mr = np.meshgrid(theta, rho) + x = mr * np.cos(mt) + y = mr * np.sin(mt) + + input_data = { + "x": x.reshape([-1, 1]), + "y": y.reshape([-1, 1]), + } + + label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) + output_data = solver.predict(input_data, return_numpy=True) + output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} + + plot(x, y, input_data, output_data, label_data) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="fractional_poisson_2d.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/fsi/conf/viv.yaml b/examples/fsi/conf/viv.yaml index 8eb3a0c38b..a55db7ea5f 100644 --- a/examples/fsi/conf/viv.yaml +++ b/examples/fsi/conf/viv.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -29,7 +38,6 @@ mode: train # running mode: train/eval seed: 42 output_dir: ${hydra:run.dir} log_freq: 20 -use_tbd: false VIV_DATA_PATH: "./VIV_Training_Neta100.mat" diff --git a/examples/fsi/viv.py b/examples/fsi/viv.py index d6b3fbf2ec..be6dc39e58 100644 --- a/examples/fsi/viv.py +++ b/examples/fsi/viv.py @@ -40,7 +40,6 @@ def train(cfg: DictConfig): "drop_last": False, "shuffle": True, }, - # "num_workers": 0, } # set constraint @@ -101,20 +100,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - use_tbd=cfg.use_tbd, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -176,11 +166,10 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate @@ -201,7 +190,7 @@ def export(cfg: DictConfig): solver = ppsci.solver.Solver( model, equation=equation, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # Convert equation to func f_func = ppsci.lambdify( diff --git a/examples/gpinn/conf/poisson_1d.yaml b/examples/gpinn/conf/poisson_1d.yaml index 5e1a7e8d21..93f3dd417e 100644 --- a/examples/gpinn/conf/poisson_1d.yaml +++ b/examples/gpinn/conf/poisson_1d.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index 840cc10d46..ecaa116b3d 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -130,17 +130,10 @@ def u_solution(in_): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -253,9 +246,8 @@ def u_solution(in_): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate after finished training solver.eval() diff --git a/examples/heat_exchanger/conf/heat_exchanger.yaml b/examples/heat_exchanger/conf/heat_exchanger.yaml index 41b0c9f948..c94d9e050c 100644 --- a/examples/heat_exchanger/conf/heat_exchanger.yaml +++ b/examples/heat_exchanger/conf/heat_exchanger.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/heat_exchanger/heat_exchanger.py b/examples/heat_exchanger/heat_exchanger.py index c32d144fcd..ac9221ba07 100644 --- a/examples/heat_exchanger/heat_exchanger.py +++ b/examples/heat_exchanger/heat_exchanger.py @@ -330,15 +330,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -539,10 +534,9 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/heat_pinn/conf/heat_pinn.yaml b/examples/heat_pinn/conf/heat_pinn.yaml index d30987e1b3..0bb916e006 100644 --- a/examples/heat_pinn/conf/heat_pinn.yaml +++ b/examples/heat_pinn/conf/heat_pinn.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/heat_pinn/heat_pinn.py b/examples/heat_pinn/heat_pinn.py index 5cf81d1805..bf8945d3a5 100644 --- a/examples/heat_pinn/heat_pinn.py +++ b/examples/heat_pinn/heat_pinn.py @@ -111,15 +111,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, + optimizer=optimizer, equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -211,9 +205,7 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # begin eval N_EVAL = 100 diff --git a/examples/hpinns/conf/hpinns.yaml b/examples/hpinns/conf/hpinns.yaml index 8176590751..e1f8619bbb 100644 --- a/examples/hpinns/conf/hpinns.yaml +++ b/examples/hpinns/conf/hpinns.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/hpinns/holography.py b/examples/hpinns/holography.py index 77bd3e7c79..6df02f4126 100644 --- a/examples/hpinns/holography.py +++ b/examples/hpinns/holography.py @@ -179,14 +179,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_adam, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -204,14 +199,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_lbfgs, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -254,14 +244,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, + optimizer=optimizer_lbfgs, validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model @@ -390,9 +375,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model_list, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate diff --git a/examples/ide/conf/volterra_ide.yaml b/examples/ide/conf/volterra_ide.yaml index bb449164e4..b8bd0c2a64 100644 --- a/examples/ide/conf/volterra_ide.yaml +++ b/examples/ide/conf/volterra_ide.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/ide/volterra_ide.py b/examples/ide/volterra_ide.py index 70d106a2b9..8f4e0c1a47 100644 --- a/examples/ide/volterra_ide.py +++ b/examples/ide/volterra_ide.py @@ -157,17 +157,10 @@ def u_solution_func(in_): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -217,10 +210,8 @@ def u_solution_func(in_) -> np.ndarray: # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate model solver.eval() @@ -246,7 +237,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/laplace/conf/laplace2d.yaml b/examples/laplace/conf/laplace2d.yaml index b3bd6b8f54..3d787116f2 100644 --- a/examples/laplace/conf/laplace2d.yaml +++ b/examples/laplace/conf/laplace2d.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/laplace/laplace2d.py b/examples/laplace/laplace2d.py index 106cbf78f6..690e5ae4d0 100644 --- a/examples/laplace/laplace2d.py +++ b/examples/laplace/laplace2d.py @@ -106,15 +106,11 @@ def u_solution_func(out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -177,11 +173,10 @@ def u_solution_func(out): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction @@ -195,7 +190,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/ldc/conf/ldc2d_modulus.yaml b/examples/ldc/conf/ldc2d_modulus.yaml new file mode 100644 index 0000000000..83b06f9887 --- /dev/null +++ b/examples/ldc/conf/ldc2d_modulus.yaml @@ -0,0 +1,69 @@ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_modulus/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.01 +RHO: 1.0 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 10 + iters_per_epoch: 1000 + save_freq: 0 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.95 + decay_steps: 4000 + by_epoch: false + batch_size: + bc_top: 1000 + bc_noslip: 1000 + pde: 4000 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1024 diff --git a/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml b/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml new file mode 100644 index 0000000000..9586ce7953 --- /dev/null +++ b/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml @@ -0,0 +1,69 @@ +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_ldc2d_importance_sampling/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.01 +RHO: 1.0 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 10 + iters_per_epoch: 1000 + save_freq: 0 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.95 + decay_steps: 4000 + by_epoch: false + batch_size: + bc_top: 1000 + bc_noslip: 1000 + pde: 4000 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1024 diff --git a/examples/ldc/conf/ldc2d_steady_Re10.yaml b/examples/ldc/conf/ldc2d_steady_Re10.yaml index 78ddd5028e..b6f1b35285 100644 --- a/examples/ldc/conf/ldc2d_steady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_steady_Re10.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml index 12248c757c..bfe97e7bbc 100644 --- a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index bbd139a35d..9dd6380563 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -139,17 +139,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -209,11 +203,10 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index 9a509674f1..199b88da67 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -182,16 +182,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -283,11 +278,10 @@ def evaluate(cfg: DictConfig): # directly evaluate pretrained model(optional) solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) diff --git a/examples/lorenz/conf/enn.yaml b/examples/lorenz/conf/enn.yaml index c37d6275be..e343d57e2d 100644 --- a/examples/lorenz/conf/enn.yaml +++ b/examples/lorenz/conf/enn.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/lorenz/conf/transformer.yaml b/examples/lorenz/conf/transformer.yaml index 8d8117526b..f11bae5aa0 100644 --- a/examples/lorenz/conf/transformer.yaml +++ b/examples/lorenz/conf/transformer.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index 88ad584994..91f988425e 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -130,13 +130,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -220,9 +216,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index d889b7a767..c55cb3799a 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -147,15 +147,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -217,10 +212,9 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) @@ -241,7 +235,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/nowcastnet/conf/nowcastnet.yaml b/examples/nowcastnet/conf/nowcastnet.yaml index a972592d2d..e9feed0910 100644 --- a/examples/nowcastnet/conf/nowcastnet.yaml +++ b/examples/nowcastnet/conf/nowcastnet.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/nowcastnet/nowcastnet.py b/examples/nowcastnet/nowcastnet.py index 8db3587d10..9bd6ee2119 100644 --- a/examples/nowcastnet/nowcastnet.py +++ b/examples/nowcastnet/nowcastnet.py @@ -51,7 +51,7 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, output_dir=output_dir, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) for batch_id, test_ims in enumerate(test_data_loader): diff --git a/examples/nsfnet/VP_NSFNet1.py b/examples/nsfnet/VP_NSFNet1.py index 3edefa7c9c..c2f3ee9420 100644 --- a/examples/nsfnet/VP_NSFNet1.py +++ b/examples/nsfnet/VP_NSFNet1.py @@ -186,18 +186,10 @@ def train(cfg: DictConfig): model=model, constraint=constraint, optimizer=optimizer, - epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=False, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, validator=validator, visualizer=None, - eval_with_no_grad=False, - output_dir=OUTPUT_DIR, + cfg=cfg, ) # train model @@ -218,20 +210,13 @@ def train(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, - epochs=EPOCHS, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=False, - log_freq=2000, - eval_freq=2000, - seed=SEED, equation=equation, validator=validator, visualizer=None, - eval_with_no_grad=False, - output_dir=OUTPUT_DIR, + cfg=cfg, ) # train model solver.train() @@ -292,6 +277,7 @@ def evaluate(cfg: DictConfig): model, equation=equation, validator=validator, + cfg=cfg, ) # eval model diff --git a/examples/nsfnet/VP_NSFNet2.py b/examples/nsfnet/VP_NSFNet2.py index 8bd5d5aed6..e23189306c 100644 --- a/examples/nsfnet/VP_NSFNet2.py +++ b/examples/nsfnet/VP_NSFNet2.py @@ -7,7 +7,6 @@ from scipy.interpolate import griddata import ppsci -from ppsci.utils import logger @hydra.main(version_base=None, config_path="./conf", config_name="VP_NSFNet2.yaml") @@ -112,10 +111,7 @@ def load_data(path, N_TRAIN, NB_TRAIN, N0_TRAIN): def train(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - # set random seed for reproducibility - SEED = cfg.seed ITERS_PER_EPOCH = cfg.iters_per_epoch # set model @@ -253,23 +249,15 @@ def train(cfg: DictConfig): )() optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, equation=equation, validator=validator, - visualizer=None, - eval_with_no_grad=False, + cfg=cfg, ) # train model solver.train() @@ -341,6 +329,7 @@ def evaluate(cfg: DictConfig): model, equation=equation, validator=validator, + cfg=cfg, ) # eval diff --git a/examples/nsfnet/VP_NSFNet3.py b/examples/nsfnet/VP_NSFNet3.py index bc6a991174..a717ecd0c7 100644 --- a/examples/nsfnet/VP_NSFNet3.py +++ b/examples/nsfnet/VP_NSFNet3.py @@ -316,19 +316,12 @@ def train(cfg: DictConfig): logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, + model, + constraint, optimizer=optimizer, - epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, equation=equation, validator=validator, - visualizer=None, - eval_with_no_grad=False, + cfg=cfg, ) # train model solver.train() @@ -389,6 +382,7 @@ def evaluate(cfg: DictConfig): model, equation=equation, validator=validator, + cfg=cfg, ) # print the relative error diff --git a/examples/nsfnet/VP_NSFNet4.py b/examples/nsfnet/VP_NSFNet4.py index 4ee56ce574..4481787644 100644 --- a/examples/nsfnet/VP_NSFNet4.py +++ b/examples/nsfnet/VP_NSFNet4.py @@ -283,20 +283,12 @@ def train(cfg: DictConfig): optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) # initialize solver solver = ppsci.solver.Solver( - model=model, - constraint=constraint, - output_dir=cfg.output_dir, + model, + constraint, optimizer=optimizer, - lr_scheduler=lr_scheduler, - epochs=cfg.epochs, - iters_per_epoch=cfg.TRAIN.lr_scheduler.iters_per_epoch, - log_freq=cfg.TRAIN.log_freq, - save_freq=cfg.TRAIN.save_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=True, equation=equation, validator=validator, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, + cfg=cfg, ) # train model solver.train() @@ -453,7 +445,8 @@ def export(cfg: DictConfig): # load pretrained model solver = ppsci.solver.Solver( - model=model, pretrained_model_path=cfg.INFER.pretrained_model_path + model=model, + cfg=cfg, ) # export models diff --git a/examples/nsfnet/conf/VP_NSFNet4.yaml b/examples/nsfnet/conf/VP_NSFNet4.yaml index 258ace18d0..6a0241b7fe 100644 --- a/examples/nsfnet/conf/VP_NSFNet4.yaml +++ b/examples/nsfnet/conf/VP_NSFNet4.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/operator_learning/conf/deeponet.yaml b/examples/operator_learning/conf/deeponet.yaml index bf81184375..725357f45c 100644 --- a/examples/operator_learning/conf/deeponet.yaml +++ b/examples/operator_learning/conf/deeponet.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/operator_learning/deeponet.py b/examples/operator_learning/deeponet.py index 79ffa56745..4b1d85fa8c 100644 --- a/examples/operator_learning/deeponet.py +++ b/examples/operator_learning/deeponet.py @@ -64,17 +64,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_freq=cfg.TRAIN.eval_freq, - log_freq=cfg.log_freq, + optimizer=optimizer, validator=validator, - eval_during_train=cfg.TRAIN.eval_during_train, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -168,11 +160,8 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - None, - cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() diff --git a/examples/phycrnet/conf/burgers_equations.yaml b/examples/phycrnet/conf/burgers_equations.yaml index 700419936a..d8b4e7fee8 100644 --- a/examples/phycrnet/conf/burgers_equations.yaml +++ b/examples/phycrnet/conf/burgers_equations.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml index dd7a1d54d8..b0517be183 100644 --- a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml +++ b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml index 125a4e289c..037e5578c4 100644 --- a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml +++ b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phycrnet/main.py b/examples/phycrnet/main.py index 21821d0865..ce9017488d 100644 --- a/examples/phycrnet/main.py +++ b/examples/phycrnet/main.py @@ -107,14 +107,9 @@ def _transform_out(_in, _out): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, + optimizer=optimizer, validator=validator_pde, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, + cfg=cfg, ) # train model diff --git a/examples/phygeonet/conf/heat_equation.yaml b/examples/phygeonet/conf/heat_equation.yaml index a76ba2c74b..14b003781a 100644 --- a/examples/phygeonet/conf/heat_equation.yaml +++ b/examples/phygeonet/conf/heat_equation.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phygeonet/conf/heat_equation_with_bc.yaml b/examples/phygeonet/conf/heat_equation_with_bc.yaml index 48b8032f57..2dd9f6b6bc 100644 --- a/examples/phygeonet/conf/heat_equation_with_bc.yaml +++ b/examples/phygeonet/conf/heat_equation_with_bc.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phygeonet/heat_equation.py b/examples/phygeonet/heat_equation.py index 17a1cb4fb5..d953e2a34a 100644 --- a/examples/phygeonet/heat_equation.py +++ b/examples/phygeonet/heat_equation.py @@ -82,10 +82,8 @@ def _transform_out( solver = ppsci.solver.Solver( model, sup_constraint, - cfg.output_dir, - optimizer, - epochs=cfg.epochs, - iters_per_epoch=iters_per_epoch, + optimizer=optimizer, + cfg=cfg, ) solver.train() solver.plot_loss_history() @@ -102,7 +100,7 @@ def evaluate(cfg: DictConfig): model = ppsci.arch.USCNN(**cfg.MODEL) solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + cfg=cfg, ) output_v = solver.predict({"coords": paddle.to_tensor(coords)}) output_v = output_v["output_v"] diff --git a/examples/phygeonet/heat_equation_with_bc.py b/examples/phygeonet/heat_equation_with_bc.py index 152e332a7c..39a7d06d21 100644 --- a/examples/phygeonet/heat_equation_with_bc.py +++ b/examples/phygeonet/heat_equation_with_bc.py @@ -87,10 +87,8 @@ def _transform_out( solver = ppsci.solver.Solver( model, sup_constraint, - cfg.output_dir, - optimizer, - epochs=cfg.epochs, - iters_per_epoch=iters_per_epoch, + optimizer=optimizer, + cfg=cfg, ) solver.train() @@ -107,7 +105,7 @@ def evaluate(cfg: DictConfig): coords = paddle.to_tensor(data["coords"]) solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + cfg=cfg, ) paras = paras.reshape([paras.shape[0], 1, paras.shape[1], paras.shape[2]]) diff --git a/examples/phylstm/conf/phylstm2.yaml b/examples/phylstm/conf/phylstm2.yaml index ea4285b156..efa8bb95ca 100644 --- a/examples/phylstm/conf/phylstm2.yaml +++ b/examples/phylstm/conf/phylstm2.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phylstm/conf/phylstm3.yaml b/examples/phylstm/conf/phylstm3.yaml index 2fcebebffe..0d9fcbeaba 100644 --- a/examples/phylstm/conf/phylstm3.yaml +++ b/examples/phylstm/conf/phylstm3.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/phylstm/phylstm2.py b/examples/phylstm/phylstm2.py index 8bf184292a..50f19730f2 100755 --- a/examples/phylstm/phylstm2.py +++ b/examples/phylstm/phylstm2.py @@ -166,16 +166,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, + optimizer=optimizer, validator=validator_pde, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -294,10 +287,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/phylstm/phylstm3.py b/examples/phylstm/phylstm3.py index c8f424168e..3274ccc112 100755 --- a/examples/phylstm/phylstm3.py +++ b/examples/phylstm/phylstm3.py @@ -171,16 +171,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint_pde, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, + optimizer=optimizer, validator=validator_pde, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # train model @@ -307,10 +300,8 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # evaluate diff --git a/examples/pipe/conf/poiseuille_flow.yaml b/examples/pipe/conf/poiseuille_flow.yaml index f00b94255a..fff0b9d150 100644 --- a/examples/pipe/conf/poiseuille_flow.yaml +++ b/examples/pipe/conf/poiseuille_flow.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index 54d20e12dd..f87855d8cb 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -140,13 +140,9 @@ def output_trans_p(input, out): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - save_freq=cfg.TRAIN.save_freq, + optimizer=optimizer, equation=equation, + cfg=cfg, ) solver.train() @@ -325,10 +321,8 @@ def forward(self, output_dict, label_dict): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) solver.eval() diff --git a/examples/rossler/conf/enn.yaml b/examples/rossler/conf/enn.yaml index 1bad85dfd6..76a91f5824 100644 --- a/examples/rossler/conf/enn.yaml +++ b/examples/rossler/conf/enn.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/rossler/conf/transformer.yaml b/examples/rossler/conf/transformer.yaml index a8c8682c00..ce449300e7 100644 --- a/examples/rossler/conf/transformer.yaml +++ b/examples/rossler/conf/transformer.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/rossler/train_enn.py b/examples/rossler/train_enn.py index b2ba6ed566..dd60cd380b 100644 --- a/examples/rossler/train_enn.py +++ b/examples/rossler/train_enn.py @@ -131,13 +131,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, + optimizer=optimizer, validator=validator, + cfg=cfg, ) # train model solver.train() @@ -218,9 +214,8 @@ def evaluate(cfg: DictConfig): validator = {mse_validator.name: mse_validator} solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() diff --git a/examples/rossler/train_transformer.py b/examples/rossler/train_transformer.py index 26453e4a32..8925a74cef 100644 --- a/examples/rossler/train_transformer.py +++ b/examples/rossler/train_transformer.py @@ -142,15 +142,10 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, validator=validator, visualizer=visualizer, + cfg=cfg, ) # train model solver.train() @@ -212,10 +207,9 @@ def evaluate(cfg: DictConfig): solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) solver.eval() # visualize prediction for pretrained model(optional) @@ -236,7 +230,7 @@ def export(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - pretrained_model_path=cfg.INFER.pretrained_model_path, + cfg=cfg, ) # export model from paddle.static import InputSpec diff --git a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml index f24273197f..665a088225 100644 --- a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml index 5a785a2b62..b5bf92237c 100644 --- a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/shock_wave/shock_wave.py b/examples/shock_wave/shock_wave.py index 13b4920d46..36c1657c06 100644 --- a/examples/shock_wave/shock_wave.py +++ b/examples/shock_wave/shock_wave.py @@ -396,17 +396,9 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, + optimizer=optimizer, equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + cfg=cfg, ) # HACK: Given entire solver to euaqtion object for tracking run-time epoch # to compute factor `relu` dynamically. @@ -424,9 +416,7 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # visualize prediction diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index c415f9a727..ea51d85146 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/tempoGAN/tempoGAN.py b/examples/tempoGAN/tempoGAN.py index c59d9eba72..3f5c590390 100644 --- a/examples/tempoGAN/tempoGAN.py +++ b/examples/tempoGAN/tempoGAN.py @@ -223,40 +223,22 @@ def train(cfg: DictConfig): solver_gen = ppsci.solver.Solver( model_list, constraint_gen, - cfg.output_dir, optimizer_gen, - lr_scheduler_gen, - cfg.TRAIN.epochs_gen, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + cfg=cfg, ) if cfg.USE_SPATIALDISC: solver_disc = ppsci.solver.Solver( model_list, constraint_disc, - cfg.output_dir, optimizer_disc, - lr_scheduler_disc, - cfg.TRAIN.epochs_disc, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + cfg=cfg, ) if cfg.USE_TEMPODISC: solver_disc_tempo = ppsci.solver.Solver( model_list, constraint_disc_tempo, - cfg.output_dir, optimizer_disc_tempo, - lr_scheduler_disc_tempo, - cfg.TRAIN.epochs_disc_tempo, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, + cfg=cfg, ) PRED_INTERVAL = 200 diff --git a/examples/topopt/topopt.py b/examples/topopt/topopt.py index ea20050f9e..9f39ba4c95 100644 --- a/examples/topopt/topopt.py +++ b/examples/topopt/topopt.py @@ -96,11 +96,9 @@ def train(cfg: DictConfig): model, constraint, OUTPUT_DIR, - optimizer, - epochs=cfg.TRAIN.epochs, + optimizer=optimizer, iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, + cfg=cfg, ) # train model diff --git a/examples/yinglong1/conf/yinglong_12.yaml b/examples/yinglong1/conf/yinglong_12.yaml index adb7239a51..669f696f18 100644 --- a/examples/yinglong1/conf/yinglong_12.yaml +++ b/examples/yinglong1/conf/yinglong_12.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/yinglong1/conf/yinglong_24.yaml b/examples/yinglong1/conf/yinglong_24.yaml index f265a1d994..6ad0b42f76 100644 --- a/examples/yinglong1/conf/yinglong_24.yaml +++ b/examples/yinglong1/conf/yinglong_24.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/ppsci/solver/solver.py b/ppsci/solver/solver.py index 6f6e93abdf..3858a1acd2 100644 --- a/ppsci/solver/solver.py +++ b/ppsci/solver/solver.py @@ -158,12 +158,18 @@ def __init__( cfg: Optional[DictConfig] = None, ): self.cfg = cfg + if isinstance(cfg, DictConfig): + # (Recommended)Params can be passed within cfg + # rather than passed to 'Solver.__init__' one-by-one. + self._parse_params_from_cfg(cfg) + # set model self.model = model # set constraint self.constraint = constraint # set output directory - self.output_dir = output_dir + if not cfg: + self.output_dir = output_dir # set optimizer self.optimizer = optimizer @@ -192,19 +198,20 @@ def __init__( ) # set training hyper-parameter - self.epochs = epochs - self.iters_per_epoch = iters_per_epoch - # set update_freq for gradient accumulation - self.update_freq = update_freq - # set checkpoint saving frequency - self.save_freq = save_freq - # set logging frequency - self.log_freq = log_freq - - # set evaluation hyper-parameter - self.eval_during_train = eval_during_train - self.start_eval_epoch = start_eval_epoch - self.eval_freq = eval_freq + if not cfg: + self.epochs = epochs + self.iters_per_epoch = iters_per_epoch + # set update_freq for gradient accumulation + self.update_freq = update_freq + # set checkpoint saving frequency + self.save_freq = save_freq + # set logging frequency + self.log_freq = log_freq + + # set evaluation hyper-parameter + self.eval_during_train = eval_during_train + self.start_eval_epoch = start_eval_epoch + self.eval_freq = eval_freq # initialize training log(training loss, time cost, etc.) recorder during one epoch self.train_output_info: Dict[str, misc.AverageMeter] = {} @@ -221,21 +228,17 @@ def __init__( "reader_cost": misc.AverageMeter("reader_cost", ".5f", postfix="s"), } - # fix seed for reproducibility - self.seed = seed - # set running device - if device != "cpu" and paddle.device.get_device() == "cpu": + if not cfg: + self.device = device + if self.device != "cpu" and paddle.device.get_device() == "cpu": logger.warning(f"Set device({device}) to 'cpu' for only cpu available.") - device = "cpu" - self.device = paddle.set_device(device) + self.device = "cpu" + self.device = paddle.set_device(self.device) # set equations for physics-driven or data-physics hybrid driven task, such as PINN self.equation = equation - # set geometry for generating data - self.geom = {} if geom is None else geom - # set validator self.validator = validator @@ -243,24 +246,27 @@ def __init__( self.visualizer = visualizer # set automatic mixed precision(AMP) configuration - self.use_amp = use_amp - self.amp_level = amp_level + if not cfg: + self.use_amp = use_amp + self.amp_level = amp_level self.scaler = amp.GradScaler(True) if self.use_amp else None # whether calculate metrics by each batch during evaluation, mainly for memory efficiency - self.compute_metric_by_batch = compute_metric_by_batch + if not cfg: + self.compute_metric_by_batch = compute_metric_by_batch if validator is not None: for metric in itertools.chain( *[_v.metric.values() for _v in self.validator.values()] ): - if metric.keep_batch ^ compute_metric_by_batch: + if metric.keep_batch ^ self.compute_metric_by_batch: raise ValueError( f"{misc.typename(metric)}.keep_batch should be " - f"{compute_metric_by_batch} when compute_metric_by_batch=" - f"{compute_metric_by_batch}." + f"{self.compute_metric_by_batch} when compute_metric_by_batch=" + f"{self.compute_metric_by_batch}." ) # whether set `stop_gradient=True` for every Tensor if no differentiation involved during evaluation - self.eval_with_no_grad = eval_with_no_grad + if not cfg: + self.eval_with_no_grad = eval_with_no_grad self.rank = dist.get_rank() self.world_size = dist.get_world_size() @@ -278,19 +284,20 @@ def __init__( # set moving average model(optional) self.ema_model = None if self.cfg and any(key in self.cfg.TRAIN for key in ["ema", "swa"]): - if "ema" in self.cfg.TRAIN: - self.avg_freq = self.cfg.TRAIN.ema.avg_freq + if "ema" in self.cfg.TRAIN and cfg.TRAIN.ema.get("use_ema", False): self.ema_model = ema.ExponentialMovingAverage( self.model, self.cfg.TRAIN.ema.decay ) - elif "swa" in self.cfg.TRAIN: - self.avg_freq = self.cfg.TRAIN.swa.avg_freq + elif "swa" in self.cfg.TRAIN and cfg.TRAIN.swa.get("use_swa", False): self.ema_model = ema.StochasticWeightAverage(self.model) # load pretrained model, usually used for transfer learning - self.pretrained_model_path = pretrained_model_path - if pretrained_model_path is not None: - save_load.load_pretrain(self.model, pretrained_model_path, self.equation) + if not cfg: + self.pretrained_model_path = pretrained_model_path + if self.pretrained_model_path is not None: + save_load.load_pretrain( + self.model, self.pretrained_model_path, self.equation + ) # initialize an dict for tracking best metric during training self.best_metric = { @@ -298,14 +305,16 @@ def __init__( "epoch": 0, } # load model checkpoint, usually used for resume training - if checkpoint_path is not None: - if pretrained_model_path is not None: + if not cfg: + self.checkpoint_path = checkpoint_path + if self.checkpoint_path is not None: + if self.pretrained_model_path is not None: logger.warning( "Detected 'pretrained_model_path' is given, weights in which might be" "overridden by weights loaded from given 'checkpoint_path'." ) loaded_metric = save_load.load_checkpoint( - checkpoint_path, + self.checkpoint_path, self.model, self.optimizer, self.scaler, @@ -366,7 +375,9 @@ def dist_wrapper(model: nn.Layer) -> paddle.DataParallel: # set VisualDL tool self.vdl_writer = None - if use_vdl: + if not cfg: + self.use_vdl = use_vdl + if self.use_vdl: with misc.RankZeroOnly(self.rank) as is_master: if is_master: self.vdl_writer = vdl.LogWriter(osp.join(output_dir, "vdl")) @@ -377,7 +388,9 @@ def dist_wrapper(model: nn.Layer) -> paddle.DataParallel: # set WandB tool self.wandb_writer = None - if use_wandb: + if not cfg: + self.use_wandb = use_wandb + if self.use_wandb: try: import wandb except ModuleNotFoundError: @@ -390,7 +403,9 @@ def dist_wrapper(model: nn.Layer) -> paddle.DataParallel: # set TensorBoardX tool self.tbd_writer = None - if use_tbd: + if not cfg: + self.use_tbd = use_tbd + if self.use_tbd: try: import tensorboardX except ModuleNotFoundError: @@ -976,50 +991,42 @@ def plot_loss_history( use_semilogy=use_semilogy, ) - # def _parse_params_from_cfg(self, cfg: DictConfig): - # """Parse hyper-parameters from DictConfig.""" - # # general paremters - # ## output directory - # self.output_dir = cfg.output_dir - # ## logging frequency - # self.log_freq = cfg.log_freq - - # # training related paramters - # self.epochs = cfg.TRAIN.epochs - # self.iters_per_epoch = cfg.TRAIN.iters_per_epoch - # ## set update_freq for gradient accumulation - # self.update_freq = cfg.TRAIN.update_freq - # ## set checkpoint saving frequency - # self.save_freq = cfg.TRAIN.save_freq - # self.eval_during_train = cfg.TRAIN.eval_during_train - # self.start_eval_epoch = cfg.TRAIN.start_eval_epoch - # self.eval_freq = cfg.TRAIN.eval_freq - - # # evaluating related paramters - # self.device = cfg.device - - # # set automatic mixed precision(AMP) configuration - # self.use_amp = cfg.use_amp - # self.amp_level = cfg.amp_level - - # # whether calculate metrics by each batch during evaluation, mainly for memory efficiency - # self.compute_metric_by_batch = compute_metric_by_batch - # # whether set `stop_gradient=True` for every Tensor if no differentiation involved during evaluation - # self.eval_with_no_grad = eval_with_no_grad - - # # set moving average model(optional) - # if self.cfg and any(key in self.cfg.TRAIN for key in ["ema", "swa"]): - # if "ema" in self.cfg.TRAIN: - # self.avg_freq = self.cfg.TRAIN.ema.avg_freq - # elif "swa" in self.cfg.TRAIN: - # self.avg_freq = self.cfg.TRAIN.swa.avg_freq - - # # load pretrained model, usually used for transfer learning - # self.pretrained_model_path = pretrained_model_path - - # # set up benchmark flag, will print memory stat if enabled - # self.benchmark_flag: bool = os.getenv("BENCHMARK_ROOT", None) is not None - - # # set up nvtx flag for nsight analysis - # self.nvtx_flag: bool = os.getenv("NVTX", None) is not None - # self.forward_helper.nvtx_flag = self.nvtx_flag + def _parse_params_from_cfg(self, cfg: DictConfig): + """ + Parse hyper-parameters from DictConfig. + """ + self.output_dir = cfg.output_dir + self.log_freq = cfg.log_freq + self.use_tbd = cfg.use_tbd + self.use_vdl = cfg.use_vdl + self.wandb_config = cfg.wandb_config + self.use_wandb = cfg.use_wandb + self.device = cfg.device + self.to_static = cfg.to_static + + self.use_amp = cfg.use_amp + self.amp_level = cfg.amp_level + + self.epochs = cfg.TRAIN.epochs + self.iters_per_epoch = cfg.TRAIN.iters_per_epoch + self.update_freq = cfg.TRAIN.update_freq + self.save_freq = cfg.TRAIN.save_freq + self.eval_during_train = cfg.TRAIN.eval_during_train + self.start_eval_epoch = cfg.TRAIN.start_eval_epoch + self.eval_freq = cfg.TRAIN.eval_freq + self.checkpoint_path = cfg.TRAIN.checkpoint_path + + if "ema" in cfg.TRAIN and cfg.TRAIN.ema.get("use_ema", False): + self.avg_freq = cfg.TRAIN.ema.avg_freq + elif "swa" in cfg.TRAIN and cfg.TRAIN.swa.get("use_swa", False): + self.avg_freq = cfg.TRAIN.swa.avg_freq + + self.compute_metric_by_batch = cfg.EVAL.compute_metric_by_batch + self.eval_with_no_grad = cfg.EVAL.eval_with_no_grad + + if cfg.mode == "train": + self.pretrained_model_path = cfg.TRAIN.pretrained_model_path + elif cfg.mode == "eval": + self.pretrained_model_path = cfg.EVAL.pretrained_model_path + elif cfg.mode == "infer": + self.pretrained_model_path = cfg.INFER.pretrained_model_path diff --git a/ppsci/utils/config.py b/ppsci/utils/config.py index 40b48d0fd4..1af88c889e 100644 --- a/ppsci/utils/config.py +++ b/ppsci/utils/config.py @@ -32,12 +32,67 @@ __all__ = ["get_config", "replace_shape_with_inputspec_", "AttrDict"] if importlib.util.find_spec("pydantic") is not None: + from hydra.core.config_store import ConfigStore + from omegaconf import OmegaConf from pydantic import BaseModel from pydantic import field_validator + from pydantic import model_validator from pydantic_core.core_schema import ValidationInfo __all__.append("SolverConfig") + class EMAConfig(BaseModel): + use_ema: bool = False + decay: float = 0.9 + avg_freq: int = 1 + + @field_validator("decay") + def decay_check(cls, v): + if v <= 0 or v >= 1: + raise ValueError( + f"'decay' should be in (0, 1) when is type of float, but got {v}" + ) + return v + + @field_validator("avg_freq") + def avg_freq_check(cls, v): + if v <= 0: + raise ValueError( + "'avg_freq' should be a positive integer when is type of int, " + f"but got {v}" + ) + return v + + class SWAConfig(BaseModel): + use_swa: bool = False + avg_freq: int = 1 + avg_range: Optional[Tuple[int, int]] = None + + @field_validator("avg_range") + def avg_range_check(cls, v, info: ValidationInfo): + if isinstance(v, tuple) and v[0] > v[1]: + raise ValueError(f"'avg_range' should be a valid range, but got {v}.") + if isinstance(v, tuple) and v[0] < 0: + raise ValueError( + "The start epoch of 'avg_range' should be a non-negtive integer" + f" , but got {v[0]}." + ) + if isinstance(v, tuple) and v[1] > info.data["epochs"]: + raise ValueError( + "The end epoch of 'avg_range' should not be lager than " + f"'epochs'({info.data['epochs']}), but got {v[1]}." + ) + return v + + @field_validator("avg_freq") + def avg_freq_check(cls, v): + if v <= 0: + raise ValueError( + "'avg_freq' should be a positive integer when is type of int, " + f"but got {v}" + ) + return v + class TrainConfig(BaseModel): """ Schema of training config for pydantic validation. @@ -55,58 +110,6 @@ class TrainConfig(BaseModel): ema: Optional[EMAConfig] = None swa: Optional[SWAConfig] = None - class EMAConfig(BaseModel): - decay: float = 0.9 - avg_freq: int = 1 - - @field_validator("decay") - def decay_check(cls, v): - if v <= 0 or v >= 1: - raise ValueError( - f"'decay' should be in (0, 1) when is type of float, but got {v}" - ) - return v - - @field_validator("avg_freq") - def avg_freq_check(cls, v): - if v <= 0: - raise ValueError( - "'avg_freq' should be a positive integer when is type of int, " - f"but got {v}" - ) - return v - - class SWAConfig(BaseModel): - avg_freq: int = 1 - avg_range: Optional[Tuple[int, int]] = None - - @field_validator("avg_range") - def avg_range_check(cls, v, info: ValidationInfo): - if v[0] > v[1]: - raise ValueError( - f"'avg_range' should be a valid range, but got {v}." - ) - if v[0] < 0: - raise ValueError( - "The start epoch of 'avg_range' should be a non-negtive integer" - f" , but got {v[0]}." - ) - if v[1] > info.data["epochs"]: - raise ValueError( - "The end epoch of 'avg_range' should not be lager than " - f"'epochs'({info.data['epochs']}), but got {v[1]}." - ) - return v - - @field_validator("avg_freq") - def avg_freq_check(cls, v): - if v <= 0: - raise ValueError( - "'avg_freq' should be a positive integer when is type of int, " - f"but got {v}" - ) - return v - # Fine-grained validator(s) below @field_validator("epochs") def epochs_check(cls, v): @@ -164,21 +167,14 @@ def eval_freq_check(cls, v, info: ValidationInfo): ) return v - @field_validator("ema") - def ema_check(cls, v, info: ValidationInfo): - if "swa" in info.data and info.data["swa"] is not None: + @model_validator(mode="after") + def ema_swa_checker(self): + if (self.ema and self.swa) and (self.ema.use_ema and self.swa.use_swa): raise ValueError( - "The config of 'swa' should not be used when 'ema' is specifed." + "Cannot enable both EMA and SWA at the same time, " + "please disable at least one of them." ) - return v - - @field_validator("swa") - def swa_check(cls, v, info: ValidationInfo): - if "ema" in info.data and info.data["ema"] is not None: - raise ValueError( - "The config of 'ema' should not be used when 'swa' is specifed." - ) - return v + return self class EvalConfig(BaseModel): """ @@ -195,7 +191,7 @@ class InferConfig(BaseModel): """ pretrained_model_path: Optional[str] = None - export_path: str + export_path: str = "./inference" pdmodel_path: Optional[str] = None pdpiparams_path: Optional[str] = None onnx_path: Optional[str] = None @@ -284,8 +280,9 @@ class SolverConfig(BaseModel): log_freq: int = 20 seed: int = 42 use_vdl: bool = False - use_wandb: bool = False + use_tbd: bool = False wandb_config: Optional[Mapping] = None + use_wandb: bool = False device: Literal["cpu", "gpu", "xpu"] = "gpu" use_amp: bool = False amp_level: Literal["O0", "O1", "O2", "OD"] = "O1" @@ -320,13 +317,61 @@ def seed_check(cls, v): @field_validator("use_wandb") def use_wandb_check(cls, v, info: ValidationInfo): - if not isinstance(info.data["wandb_config"], dict): + if v and not isinstance(info.data["wandb_config"], dict): raise ValueError( "'wandb_config' should be a dict when 'use_wandb' is True, " f"but got {misc.typename(info.data['wandb_config'])}" ) return v + # store SolverConfig as 'ppsci_default' so as to be used as default config in *.yaml + """ + #### xxx.yaml #### + defaults: + - ppsci_default <-- 'ppsci_default' used here + - TRAIN: train_default <-- 'train_default' used here + - TRAIN/ema: ema_default <-- 'ema_default' used here + - TRAIN/swa: swa_default <-- 'swa_default' used here + - EVAL: eval_default <-- 'eval_default' used here + - INFER: infer_default <-- 'infer_default' used here + - _self_ + mode: train + seed: 42 + ... + ... + ################## + """ + + global_default_cfg = SolverConfig().model_dump() + omegaconf_dict_config = OmegaConf.create(global_default_cfg) + cs = ConfigStore.instance() + cs.store(name="ppsci_default", node=omegaconf_dict_config) + + train_default_cfg = TrainConfig().model_dump() + train_omegaconf_dict_config = OmegaConf.create(train_default_cfg) + cs = ConfigStore.instance() + cs.store(group="TRAIN", name="train_default", node=train_omegaconf_dict_config) + + ema_default_cfg = EMAConfig().model_dump() + ema_omegaconf_dict_config = OmegaConf.create(ema_default_cfg) + cs = ConfigStore.instance() + cs.store(group="TRAIN/ema", name="ema_default", node=ema_omegaconf_dict_config) + + swa_default_cfg = SWAConfig().model_dump() + swa_omegaconf_dict_config = OmegaConf.create(swa_default_cfg) + cs = ConfigStore.instance() + cs.store(group="TRAIN/swa", name="swa_default", node=swa_omegaconf_dict_config) + + eval_default_cfg = EvalConfig().model_dump() + eval_omegaconf_dict_config = OmegaConf.create(eval_default_cfg) + cs = ConfigStore.instance() + cs.store(group="EVAL", name="eval_default", node=eval_omegaconf_dict_config) + + infer_default_cfg = InferConfig().model_dump() + infer_omegaconf_dict_config = OmegaConf.create(infer_default_cfg) + cs = ConfigStore.instance() + cs.store(group="INFER", name="infer_default", node=infer_omegaconf_dict_config) + class AttrDict(dict): def __getattr__(self, key): From b8e39c9562f57680b54247c43b90465495ee611e Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 30 Apr 2024 03:58:01 +0000 Subject: [PATCH 04/20] remove deprecated class and function in config.py --- ppsci/arch/__init__.py | 2 +- ppsci/constraint/__init__.py | 2 +- ppsci/data/dataset/__init__.py | 2 +- ppsci/equation/__init__.py | 2 +- ppsci/geometry/__init__.py | 2 +- ppsci/loss/__init__.py | 2 +- ppsci/loss/mtl/__init__.py | 2 +- ppsci/metric/__init__.py | 2 +- ppsci/optimizer/__init__.py | 4 +- ppsci/utils/__init__.py | 2 - ppsci/utils/config.py | 208 ++------------------------------- ppsci/validate/__init__.py | 2 +- ppsci/visualize/__init__.py | 2 +- 13 files changed, 20 insertions(+), 214 deletions(-) diff --git a/ppsci/arch/__init__.py b/ppsci/arch/__init__.py index c959c2c079..956660d1a3 100644 --- a/ppsci/arch/__init__.py +++ b/ppsci/arch/__init__.py @@ -79,7 +79,7 @@ def build_model(cfg): """Build model Args: - cfg (AttrDict): Arch config. + cfg (DictConfig): Arch config. Returns: nn.Layer: Model. diff --git a/ppsci/constraint/__init__.py b/ppsci/constraint/__init__.py index 6cbe1a42b0..9179439436 100644 --- a/ppsci/constraint/__init__.py +++ b/ppsci/constraint/__init__.py @@ -42,7 +42,7 @@ def build_constraint(cfg, equation_dict, geom_dict): """Build constraint(s). Args: - cfg (List[AttrDict]): Constraint config list. + cfg (List[DictConfig]): Constraint config list. equation_dict (Dct[str, Equation]): Equation(s) in dict. geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. diff --git a/ppsci/data/dataset/__init__.py b/ppsci/data/dataset/__init__.py index 9979ac8013..9ea66798ca 100644 --- a/ppsci/data/dataset/__init__.py +++ b/ppsci/data/dataset/__init__.py @@ -74,7 +74,7 @@ def build_dataset(cfg) -> "io.Dataset": """Build dataset Args: - cfg (List[AttrDict]): dataset config list. + cfg (List[DictConfig]): dataset config list. Returns: Dict[str, io.Dataset]: dataset. diff --git a/ppsci/equation/__init__.py b/ppsci/equation/__init__.py index 77a9b20860..2b97d378b7 100644 --- a/ppsci/equation/__init__.py +++ b/ppsci/equation/__init__.py @@ -54,7 +54,7 @@ def build_equation(cfg): """Build equation(s) Args: - cfg (List[AttrDict]): Equation(s) config list. + cfg (List[DictConfig]): Equation(s) config list. Returns: Dict[str, Equation]: Equation(s) in dict. diff --git a/ppsci/geometry/__init__.py b/ppsci/geometry/__init__.py index 4f1ff0b122..768ed0581d 100644 --- a/ppsci/geometry/__init__.py +++ b/ppsci/geometry/__init__.py @@ -54,7 +54,7 @@ def build_geometry(cfg): """Build geometry(ies) Args: - cfg (List[AttrDict]): Geometry config list. + cfg (List[DictConfig]): Geometry config list. Returns: Dict[str, Geometry]: Geometry(ies) in dict. diff --git a/ppsci/loss/__init__.py b/ppsci/loss/__init__.py index 26332da8be..ea3f05aa76 100644 --- a/ppsci/loss/__init__.py +++ b/ppsci/loss/__init__.py @@ -51,7 +51,7 @@ def build_loss(cfg): """Build loss. Args: - cfg (AttrDict): Loss config. + cfg (DictConfig): Loss config. Returns: Loss: Callable loss object. """ diff --git a/ppsci/loss/mtl/__init__.py b/ppsci/loss/mtl/__init__.py index 8dada46fac..f0e218d98e 100644 --- a/ppsci/loss/mtl/__init__.py +++ b/ppsci/loss/mtl/__init__.py @@ -33,7 +33,7 @@ def build_mtl_aggregator(cfg): """Build loss aggregator with multi-task learning method. Args: - cfg (AttrDict): Aggregator config. + cfg (DictConfig): Aggregator config. Returns: Loss: Callable loss aggregator object. """ diff --git a/ppsci/metric/__init__.py b/ppsci/metric/__init__.py index 5390db4c4e..6059b22116 100644 --- a/ppsci/metric/__init__.py +++ b/ppsci/metric/__init__.py @@ -43,7 +43,7 @@ def build_metric(cfg): """Build metric. Args: - cfg (List[AttrDict]): List of metric config. + cfg (List[DictConfig]): List of metric config. Returns: Dict[str, Metric]: Dict of callable metric object. diff --git a/ppsci/optimizer/__init__.py b/ppsci/optimizer/__init__.py index c973b489fb..7dcf33b40b 100644 --- a/ppsci/optimizer/__init__.py +++ b/ppsci/optimizer/__init__.py @@ -39,7 +39,7 @@ def build_lr_scheduler(cfg, epochs, iters_per_epoch): """Build learning rate scheduler. Args: - cfg (AttrDict): Learning rate scheduler config. + cfg (DictConfig): Learning rate scheduler config. epochs (int): Total epochs. iters_per_epoch (int): Number of iterations of one epoch. @@ -57,7 +57,7 @@ def build_optimizer(cfg, model_list, epochs, iters_per_epoch): """Build optimizer and learning rate scheduler Args: - cfg (AttrDict): Learning rate scheduler config. + cfg (DictConfig): Learning rate scheduler config. model_list (Tuple[nn.Layer, ...]): Tuple of model(s). epochs (int): Total epochs. iters_per_epoch (int): Number of iterations of one epoch. diff --git a/ppsci/utils/__init__.py b/ppsci/utils/__init__.py index 5b076fb3bb..0c30d36d95 100644 --- a/ppsci/utils/__init__.py +++ b/ppsci/utils/__init__.py @@ -22,7 +22,6 @@ from ppsci.utils.checker import dynamic_import_to_globals from ppsci.utils.checker import run_check from ppsci.utils.checker import run_check_mesh -from ppsci.utils.config import AttrDict from ppsci.utils.expression import ExpressionSolver from ppsci.utils.misc import AverageMeter from ppsci.utils.misc import set_random_seed @@ -39,7 +38,6 @@ from ppsci.utils.writer import save_tecplot_file __all__ = [ - "AttrDict", "AverageMeter", "ExpressionSolver", "initializer", diff --git a/ppsci/utils/config.py b/ppsci/utils/config.py index 1af88c889e..16e839f3f6 100644 --- a/ppsci/utils/config.py +++ b/ppsci/utils/config.py @@ -14,22 +14,16 @@ from __future__ import annotations -import argparse -import copy import importlib.util -import os from typing import Mapping from typing import Optional from typing import Tuple -import yaml -from paddle import static from typing_extensions import Literal -from ppsci.utils import logger from ppsci.utils import misc -__all__ = ["get_config", "replace_shape_with_inputspec_", "AttrDict"] +__all__ = [] if importlib.util.find_spec("pydantic") is not None: from hydra.core.config_store import ConfigStore @@ -324,16 +318,16 @@ def use_wandb_check(cls, v, info: ValidationInfo): ) return v - # store SolverConfig as 'ppsci_default' so as to be used as default config in *.yaml + # Register 'XXXConfig' as default node, so as to be used as default config in *.yaml """ #### xxx.yaml #### defaults: - - ppsci_default <-- 'ppsci_default' used here - - TRAIN: train_default <-- 'train_default' used here - - TRAIN/ema: ema_default <-- 'ema_default' used here - - TRAIN/swa: swa_default <-- 'swa_default' used here - - EVAL: eval_default <-- 'eval_default' used here - - INFER: infer_default <-- 'infer_default' used here + - ppsci_default <-- 'ppsci_default' used here + - TRAIN: train_default <-- 'train_default' used here + - TRAIN/ema: ema_default <-- 'ema_default' used here + - TRAIN/swa: swa_default <-- 'swa_default' used here + - EVAL: eval_default <-- 'eval_default' used here + - INFER: infer_default <-- 'infer_default' used here - _self_ mode: train seed: 42 @@ -371,189 +365,3 @@ def use_wandb_check(cls, v, info: ValidationInfo): infer_omegaconf_dict_config = OmegaConf.create(infer_default_cfg) cs = ConfigStore.instance() cs.store(group="INFER", name="infer_default", node=infer_omegaconf_dict_config) - - -class AttrDict(dict): - def __getattr__(self, key): - return self[key] - - def __setattr__(self, key, value): - if key in self.__dict__: - self.__dict__[key] = value - else: - self[key] = value - - def __deepcopy__(self, content): - return AttrDict(copy.deepcopy(dict(self))) - - -def create_attr_dict(yaml_config): - from ast import literal_eval - - for key, value in yaml_config.items(): - if isinstance(value, dict): - yaml_config[key] = value = AttrDict(value) - if isinstance(value, str): - try: - value = literal_eval(value) - except BaseException: - pass - if isinstance(value, AttrDict): - create_attr_dict(yaml_config[key]) - else: - yaml_config[key] = value - - -def parse_config(cfg_file): - """Load a config file into AttrDict""" - with open(cfg_file, "r") as fopen: - yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) - create_attr_dict(yaml_config) - return yaml_config - - -def print_dict(d, delimiter=0): - """ - Recursively visualize a dict and - indenting according by the relationship of keys. - """ - placeholder = "-" * 60 - for k, v in d.items(): - if isinstance(v, dict): - logger.info(f"{delimiter * ' '}{k} : ") - print_dict(v, delimiter + 4) - elif isinstance(v, list) and len(v) >= 1 and isinstance(v[0], dict): - logger.info(f"{delimiter * ' '}{k} : ") - for value in v: - print_dict(value, delimiter + 2) - else: - logger.info(f"{delimiter * ' '}{k} : {v}") - - if k[0].isupper() and delimiter == 0: - logger.info(placeholder) - - -def print_config(config): - """ - Visualize configs - Arguments: - config: configs - """ - logger.advertise() - print_dict(config) - - -def override(dl, ks, v): - """ - Recursively replace dict of list - Args: - dl(dict or list): dict or list to be replaced - ks(list): list of keys - v(str): value to be replaced - """ - - def str2num(v): - try: - return eval(v) - except Exception: - return v - - if not isinstance(dl, (list, dict)): - raise ValueError(f"{dl} should be a list or a dict") - if len(ks) <= 0: - raise ValueError("length of keys should be larger than 0") - - if isinstance(dl, list): - k = str2num(ks[0]) - if len(ks) == 1: - if k >= len(dl): - raise ValueError(f"index({k}) out of range({dl})") - dl[k] = str2num(v) - else: - override(dl[k], ks[1:], v) - else: - if len(ks) == 1: - # assert ks[0] in dl, (f"{ks[0]} is not exist in {dl}") - if ks[0] not in dl: - print(f"A new field ({ks[0]}) detected!") - dl[ks[0]] = str2num(v) - else: - if ks[0] not in dl.keys(): - dl[ks[0]] = {} - print(f"A new Series field ({ks[0]}) detected!") - override(dl[ks[0]], ks[1:], v) - - -def override_config(config, options=None): - """ - Recursively override the config - Args: - config(dict): dict to be replaced - options(list): list of pairs(key0.key1.idx.key2=value) - such as: [ - "topk=2", - "VALID.transforms.1.ResizeImage.resize_short=300" - ] - Returns: - config(dict): replaced config - """ - if options is not None: - for opt in options: - assert isinstance(opt, str), f"option({opt}) should be a str" - assert ( - "=" in opt - ), f"option({opt}) should contain a = to distinguish between key and value" - pair = opt.split("=") - assert len(pair) == 2, "there can be only a = in the option" - key, value = pair - keys = key.split(".") - override(config, keys, value) - return config - - -def get_config(fname, overrides=None, show=False): - """ - Read config from file - """ - if not os.path.exists(fname): - raise FileNotFoundError(f"config file({fname}) is not exist") - config = parse_config(fname) - override_config(config, overrides) - if show: - print_config(config) - return config - - -def parse_args(): - parser = argparse.ArgumentParser("paddlescience running script") - parser.add_argument("-e", "--epochs", type=int, help="training epochs") - parser.add_argument("-o", "--output_dir", type=str, help="output directory") - parser.add_argument( - "--to_static", - action="store_true", - help="whether enable to_static for forward computation", - ) - - args = parser.parse_args() - return args - - -def _is_num_seq(seq): - # whether seq is all int number(it is a shape) - return isinstance(seq, (list, tuple)) and all(isinstance(x, int) for x in seq) - - -def replace_shape_with_inputspec_(node: AttrDict): - if _is_num_seq(node): - return True - - if isinstance(node, dict): - for key in node: - if replace_shape_with_inputspec_(node[key]): - node[key] = static.InputSpec(node[key]) - elif isinstance(node, list): - for i in range(len(node)): - if replace_shape_with_inputspec_(node[i]): - node[i] = static.InputSpec(node[i]) - - return False diff --git a/ppsci/validate/__init__.py b/ppsci/validate/__init__.py index 9e05b13665..3bc1c9ae4d 100644 --- a/ppsci/validate/__init__.py +++ b/ppsci/validate/__init__.py @@ -33,7 +33,7 @@ def build_validator(cfg, equation_dict, geom_dict): """Build validator(s). Args: - cfg (List[AttrDict]): Validator(s) config list. + cfg (List[DictConfig]): Validator(s) config list. geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. equation_dict (Dct[str, Equation]): Equation(s) in dict. diff --git a/ppsci/visualize/__init__.py b/ppsci/visualize/__init__.py index 7beea234c5..73cd0e0953 100644 --- a/ppsci/visualize/__init__.py +++ b/ppsci/visualize/__init__.py @@ -55,7 +55,7 @@ def build_visualizer(cfg): """Build visualizer(s). Args: - cfg (List[AttrDict]): Visualizer(s) config list. + cfg (List[DictConfig]): Visualizer(s) config list. geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. equation_dict (Dct[str, Equation]): Equation(s) in dict. From 64eedf7b6654b2ab1712d4b5d5620ca6cc7ff583 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 30 Apr 2024 04:06:21 +0000 Subject: [PATCH 05/20] update docstring of callbacks.py --- ppsci/utils/callbacks.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ppsci/utils/callbacks.py b/ppsci/utils/callbacks.py index e55a29130f..bcfbbd46bd 100644 --- a/ppsci/utils/callbacks.py +++ b/ppsci/utils/callbacks.py @@ -31,9 +31,10 @@ class InitCallback(Callback): """Callback class for: - 1. Parse config dict from given yaml file and check its validity, complete missing items by its' default values. + 1. Parse config dict from given yaml file and check its validity. 2. Fixing random seed to 'config.seed'. 3. Initialize logger while creating output directory(if not exist). + 4. Enable prim mode if specified. NOTE: This callback is mainly for reducing unnecessary duplicate code in each examples code when runing with hydra. @@ -60,8 +61,6 @@ class InitCallback(Callback): """ def on_job_start(self, config: DictConfig, **kwargs: Any) -> None: - # check given cfg using pre-defined pydantic schema in 'SolverConfig', error(s) will be raised - # if any checking failed at this step if importlib.util.find_spec("pydantic") is not None: from pydantic import ValidationError else: @@ -76,8 +75,6 @@ def on_job_start(self, config: DictConfig, **kwargs: Any) -> None: # error(s) will be printed and exit program if any checking failed at this step try: _model_pydantic = config_module.SolverConfig(**dict(config)) - # complete missing items with default values pre-defined in pydantic schema in - # 'SolverConfig' full_cfg = DictConfig(_model_pydantic.model_dump()) except ValidationError as e: print(e) @@ -100,7 +97,7 @@ def on_job_start(self, config: DictConfig, **kwargs: Any) -> None: # enable prim if specified if "prim" in full_cfg and bool(full_cfg.prim): - # Mostly for dy2st running, will be removed in the future + # Mostly for compiler running with dy2st. from paddle.framework import core core.set_prim_eager_enabled(True) From 9529ba44aab923b1123212c71a461e4da0aac879 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 30 Apr 2024 06:58:35 +0000 Subject: [PATCH 06/20] update code --- examples/NLS-MB/NLS-MB_optical_rogue_wave.py | 1 - examples/ldc/conf/ldc2d_modulus.yaml | 69 ------------------- .../ldc2d_modulus_importance_sampling.yaml | 69 ------------------- examples/lorenz/train_enn.py | 19 ++++- examples/lorenz/train_transformer.py | 16 ++++- examples/nsfnet/conf/VP_NSFNet4.yaml | 30 ++++---- examples/pipe/poiseuille_flow.py | 1 + examples/tempoGAN/conf/tempogan.yaml | 4 +- examples/tempoGAN/tempoGAN.py | 6 +- 9 files changed, 52 insertions(+), 163 deletions(-) delete mode 100644 examples/ldc/conf/ldc2d_modulus.yaml delete mode 100644 examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index 1e486fa7cb..7abe376437 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -258,7 +258,6 @@ def train(cfg: DictConfig): epochs=EPOCHS, equation=equation, validator=validator, - cfg=cfg, ) # train model solver.train() diff --git a/examples/ldc/conf/ldc2d_modulus.yaml b/examples/ldc/conf/ldc2d_modulus.yaml deleted file mode 100644 index 83b06f9887..0000000000 --- a/examples/ldc/conf/ldc2d_modulus.yaml +++ /dev/null @@ -1,69 +0,0 @@ -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_modulus/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 2023 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set working condition -NU: 0.01 -RHO: 1.0 - -# model settings -MODEL: - input_keys: ["x", "y"] - output_keys: ["u", "v", "p"] - num_layers: 6 - hidden_size: 512 - activation: "silu" - weight_norm: true - -# training settings -TRAIN: - epochs: 10 - iters_per_epoch: 1000 - save_freq: 0 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 0.001 - gamma: 0.95 - decay_steps: 4000 - by_epoch: false - batch_size: - bc_top: 1000 - bc_noslip: 1000 - pde: 4000 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 1024 diff --git a/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml b/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml deleted file mode 100644 index 9586ce7953..0000000000 --- a/examples/ldc/conf/ldc2d_modulus_importance_sampling.yaml +++ /dev/null @@ -1,69 +0,0 @@ -hydra: - run: - # dynamic output directory according to running time and override name - dir: output_ldc2d_importance_sampling/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 2023 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set working condition -NU: 0.01 -RHO: 1.0 - -# model settings -MODEL: - input_keys: ["x", "y"] - output_keys: ["u", "v", "p"] - num_layers: 6 - hidden_size: 512 - activation: "silu" - weight_norm: true - -# training settings -TRAIN: - epochs: 10 - iters_per_epoch: 1000 - save_freq: 0 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 0.001 - gamma: 0.95 - decay_steps: 4000 - by_epoch: false - batch_size: - bc_top: 1000 - bc_noslip: 1000 - pde: 4000 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 1024 diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index 91f988425e..0a31c2fcd3 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -18,6 +18,7 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. +from os import path as osp import hydra import numpy as np @@ -25,6 +26,7 @@ from omegaconf import DictConfig import ppsci +from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -38,6 +40,11 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -130,9 +137,12 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - optimizer=optimizer, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=True, validator=validator, - cfg=cfg, ) # train model solver.train() @@ -141,6 +151,11 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index c55cb3799a..af2ce55738 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -18,6 +18,7 @@ # This file is for step2: training a transformer model, based on frozen pretrained embedding model. # This file is based on PaddleScience/ppsci API. +from os import path as osp from typing import Dict import hydra @@ -26,6 +27,7 @@ import ppsci from ppsci.arch import base +from ppsci.utils import logger from ppsci.utils import save_load @@ -55,6 +57,9 @@ def train(cfg: DictConfig): # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 # test time-series: 256 time-steps: 1024 # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) @@ -147,10 +152,14 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - optimizer=optimizer, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, validator=validator, visualizer=visualizer, - cfg=cfg, ) # train model solver.train() @@ -161,6 +170,9 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): + # directly evaluate pretrained model(optional) + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) diff --git a/examples/nsfnet/conf/VP_NSFNet4.yaml b/examples/nsfnet/conf/VP_NSFNet4.yaml index 6a0241b7fe..fded28c455 100644 --- a/examples/nsfnet/conf/VP_NSFNet4.yaml +++ b/examples/nsfnet/conf/VP_NSFNet4.yaml @@ -29,10 +29,19 @@ hydra: sweep: # output directory for multirun dir: ${hydra.run.dir} -seed: 1234 + +mode: train output_dir: ${hydra:run.dir} +seed: 1234 data_dir: ./data/ -log_freq: 20 +log_freq: 5000 +ntrain: 11333 +nb_train: 2952 +n0_train: 986 +alpha: 100 +beta: 100 +re: 999.35 + MODEL: input_keys: ["x", "y","z","t"] output_keys: ["u", "v", "w","p"] @@ -40,29 +49,20 @@ MODEL: hidden_size: 300 activation: "tanh" weight_norm: True -mode: train -ntrain: 11333 -nb_train: 2952 -n0_train: 986 -alpha: 100 -beta: 100 -re: 999.35 -epochs: 15250 TRAIN: - log_freq: 5000 + epochs: 15250 eval_freq: 5000 save_freq: 5000 - eval_with_no_grad: true + iters_per_epoch: 150 lr_scheduler: - epochs: 15250 + epochs: ${TRAIN.epochs} decay_epochs: [250, 4500, 5000, 5500] - iters_per_epoch: 150 values: [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] + EVAL: pretrained_model_path: null eval_with_no_grad: true - INFER: pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/nsfnet/nsfnet4.pdparams export_path: ./inference/VP_NSFNet4 diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index f87855d8cb..257211ec16 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -140,6 +140,7 @@ def output_trans_p(input, out): solver = ppsci.solver.Solver( model, constraint, + iters_per_epoch=ITERS_PER_EPOCH, optimizer=optimizer, equation=equation, cfg=cfg, diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index ea51d85146..bacaf36454 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -38,9 +38,10 @@ output_dir: ${hydra:run.dir} log_freq: 20 DATASET_PATH: ./datasets/tempoGAN/2d_train.mat DATASET_PATH_VALID: ./datasets/tempoGAN/2d_valid.mat +use_amp: true +amp_level: O2 # set working condition -USE_AMP: true USE_SPATIALDISC: true USE_TEMPODISC: true WEIGHT_GEN: [5.0, 0.0, 1.0] # lambda_l1, lambda_l2, lambda_t @@ -96,7 +97,6 @@ TRAIN: gamma: 0.05 by_epoch: true eval_during_train: false - amp_level: O2 pretrained_model_path: null checkpoint_path: null diff --git a/examples/tempoGAN/tempoGAN.py b/examples/tempoGAN/tempoGAN.py index 3f5c590390..2591b047c5 100644 --- a/examples/tempoGAN/tempoGAN.py +++ b/examples/tempoGAN/tempoGAN.py @@ -223,21 +223,21 @@ def train(cfg: DictConfig): solver_gen = ppsci.solver.Solver( model_list, constraint_gen, - optimizer_gen, + optimizer=optimizer_gen, cfg=cfg, ) if cfg.USE_SPATIALDISC: solver_disc = ppsci.solver.Solver( model_list, constraint_disc, - optimizer_disc, + optimizer=optimizer_disc, cfg=cfg, ) if cfg.USE_TEMPODISC: solver_disc_tempo = ppsci.solver.Solver( model_list, constraint_disc_tempo, - optimizer_disc_tempo, + optimizer=optimizer_disc_tempo, cfg=cfg, ) From f2f6b81d2614e37485f512e73e5116bdfc71e156 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 9 May 2024 11:57:13 +0800 Subject: [PATCH 07/20] update code --- examples/NLS-MB/NLS-MB_optical_rogue_wave.py | 4 + examples/NLS-MB/NLS-MB_optical_soliton.py | 6 +- examples/advection/conf/adv_plain.yaml | 96 ------------------- examples/aneurysm/aneurysm_flow.py | 1 + .../convection_diffusion/conf/5_best.yaml | 90 ----------------- examples/convection_diffusion/conf/6.yaml | 89 ----------------- .../2d_unsteady/cylinder2d_unsteady_Re100.py | 13 +-- examples/fourcastnet/train_finetune.py | 1 + examples/fourcastnet/train_precip.py | 1 + examples/fourcastnet/train_pretrain.py | 1 + examples/fpde/conf/fractional_poisson_2d.yaml | 69 +++++++++++++ examples/pipe/poiseuille_flow.py | 7 +- 12 files changed, 89 insertions(+), 289 deletions(-) delete mode 100644 examples/advection/conf/adv_plain.yaml delete mode 100644 examples/convection_diffusion/conf/5_best.yaml delete mode 100644 examples/convection_diffusion/conf/6.yaml create mode 100644 examples/fpde/conf/fractional_poisson_2d.yaml diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index 7abe376437..4f208da5bf 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -256,8 +256,12 @@ def train(cfg: DictConfig): OUTPUT_DIR, optimizer=optimizer_lbfgs, epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, + eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, + eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, + cfg=cfg, ) # train model solver.train() diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index 5dbf431c15..820e0452ed 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -232,8 +232,10 @@ def train(cfg: DictConfig): constraint, OUTPUT_DIR, optimizer_lbfgs, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, + epochs=EPOCHS, + iters_per_epoch=cfg.TRAIN.lbfgs.iters_per_epoch, + eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, + eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, cfg=cfg, diff --git a/examples/advection/conf/adv_plain.yaml b/examples/advection/conf/adv_plain.yaml deleted file mode 100644 index 50e2dc64a6..0000000000 --- a/examples/advection/conf/adv_plain.yaml +++ /dev/null @@ -1,96 +0,0 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_advection_plain/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA_PATH: ./dataset/allen_cahn.mat - -# model settings -MODEL: - input_keys: [t, x] - output_keys: [u] - num_layers: 4 - hidden_size: 256 - activation: tanh - periods: - t: [2*np.pi, False] - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 2000 - by_epoch: false - batch_size: 4096 - pretrained_model_path: null - checkpoint_path: null - ema: - decay: 0.9 - avg_freq: 1 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams - export_path: ./inference/allen_cahn - pdmodel_path: ${INFER.export_path}.pdmodel - pdpiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index e42d0b17e6..a2089b2b22 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -197,6 +197,7 @@ def output_transform_p(self, in_, out): model, constraint, optimizer=optimizer, + iters_per_epoch=int(x.shape[0] / cfg.TRAIN.batch_size), equation=equation, cfg=cfg, ) diff --git a/examples/convection_diffusion/conf/5_best.yaml b/examples/convection_diffusion/conf/5_best.yaml deleted file mode 100644 index 43f33bb9ba..0000000000 --- a/examples/convection_diffusion/conf/5_best.yaml +++ /dev/null @@ -1,90 +0,0 @@ -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_case5/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -log_grad_norm: false -log_loss: false - -mode: train -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 20 -LAMBDA: 0.0625 -EPS: 10.0 -DA: 10.0 -K0: 1.0 -PE: 30.0 -l: 2 -L: 10 -H: 1.0 -T: 5 -DT: 0.1 -NY: 25 - -MT: 2 -FDM_C_PATH: ./datasets/case5_fdm_reaction_txyc.csv -FDM_B_PATH: ./datasets/case5_fdm_reaction_txby.csv -MODEL: - model_c: - input_keys: - - t - - x - - 'y' - output_keys: - - c10 - num_layers: 4 - hidden_size: 64 - output_dim: 10 - activation: tanh - model_b: - input_keys: - - t - - x - - 'y' - output_keys: - - b - num_layers: 1 - hidden_size: 16 - output_dim: 1 - activation: tanh -TRAIN: - epochs: 3000 - l_bfgs_epochs: 150 - iters_per_epoch: 1 - eval_during_train: true - eval_freq: 500 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 0.001 - gamma: 0.1 - by_epoch: true - weight: - IC: 100 - BC: 2000 - EQ: 30 - AD: 0.03 - pretrained_model_path: null -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: - metric_c: 8192 - metric_b: 8192 diff --git a/examples/convection_diffusion/conf/6.yaml b/examples/convection_diffusion/conf/6.yaml deleted file mode 100644 index 171deaee6e..0000000000 --- a/examples/convection_diffusion/conf/6.yaml +++ /dev/null @@ -1,89 +0,0 @@ -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_case6/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -log_grad_norm: false -log_loss: false - -mode: train -seed: 2023 -output_dir: ${hydra:run.dir} -log_freq: 20 -LAMBDA: 0.0625 -EPS: 0.1 -DA: 0.1 -K0: 1.0 -PE: 30.0 -l: 2 -L: 10 -H: 1.0 -T: 250 -DT: 5 -NY: 25 -FDM_C_PATH: ./datasets/case6_fdm_reaction_txyc.csv -FDM_B_PATH: ./datasets/case6_fdm_reaction_txby.csv -MODEL: - model_c: - input_keys: - - t - - x - - 'y' - output_keys: - - c10 - num_layers: 4 - hidden_size: 64 - output_dim: 10 - activation: tanh - model_b: - input_keys: - - t - - x - - 'y' - output_keys: - - b - num_layers: 1 - hidden_size: 16 - output_dim: 1 - activation: tanh -TRAIN: - RUN_LBFGS: false - epochs: 3000 - l_bfgs_epochs: 150 - iters_per_epoch: 1 - eval_during_train: true - eval_freq: 500 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 0.001 - gamma: 0.1 - by_epoch: true - weight: - IC: 1.0e2 # 关键参数 - BC: 1.0e2 # 关键参数 - EQ: 1.0e3 # 关键参数 - AD: 1.0e3 # 关键参数 - pretrained_model_path: null -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: - metric_c: 8192 - metric_b: 8192 diff --git a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py index 8e46442fe5..f37a91dc0e 100644 --- a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +++ b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py @@ -199,17 +199,11 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, + optimizer=optimizer, equation=equation, validator=validator, visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, + cfg=cfg, ) # train model solver.train() @@ -283,10 +277,9 @@ def evaluate(cfg: DictConfig): # initialize solver solver = ppsci.solver.Solver( model, - output_dir=cfg.output_dir, validator=validator, visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, + cfg=cfg, ) # evaluate solver.eval() diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index 70b1779ab9..aaa08bf44e 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -163,6 +163,7 @@ def train(cfg: DictConfig): model, constraint, optimizer=optimizer, + iters_per_epoch=ITERS_PER_EPOCH, validator=validator, cfg=cfg, ) diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index 88f41646f6..893c996b47 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -159,6 +159,7 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, + iters_per_epoch=ITERS_PER_EPOCH, optimizer=optimizer, validator=validator, cfg=cfg, diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index ef0c9e3b57..791c44039b 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -152,6 +152,7 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, + iters_per_epoch=ITERS_PER_EPOCH, optimizer=optimizer, validator=validator, cfg=cfg, diff --git a/examples/fpde/conf/fractional_poisson_2d.yaml b/examples/fpde/conf/fractional_poisson_2d.yaml new file mode 100644 index 0000000000..9a85387ba5 --- /dev/null +++ b/examples/fpde/conf/fractional_poisson_2d.yaml @@ -0,0 +1,69 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_fractional_poisson_2d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +ALPHA: 1.8 +NPOINT_INTERIOR: 100 +NPOINT_BC: 1 +NPOINT_EVAL: 1000 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u"] + num_layers: 4 + hidden_size: 20 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + save_freq: 20 + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: + sup_validator: 128 diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index 257211ec16..dfc2965ce7 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -140,10 +140,13 @@ def output_trans_p(input, out): solver = ppsci.solver.Solver( model, constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, iters_per_epoch=ITERS_PER_EPOCH, - optimizer=optimizer, + eval_during_train=cfg.TRAIN.eval_during_train, + save_freq=cfg.TRAIN.save_freq, equation=equation, - cfg=cfg, ) solver.train() From 26b0aa0e92451a56a72520f80d8bc5fd9d18ef4b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 9 May 2024 12:04:04 +0800 Subject: [PATCH 08/20] restore some config --- examples/NLS-MB/NLS-MB_optical_rogue_wave.py | 1 - examples/NLS-MB/NLS-MB_optical_soliton.py | 1 - examples/aneurysm/aneurysm_flow.py | 9 +++++++-- examples/fourcastnet/train_finetune.py | 9 +++++++-- examples/fourcastnet/train_precip.py | 11 ++++++++--- examples/fourcastnet/train_pretrain.py | 12 +++++++++--- 6 files changed, 31 insertions(+), 12 deletions(-) diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index 4f208da5bf..785332c7fb 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -261,7 +261,6 @@ def train(cfg: DictConfig): eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, - cfg=cfg, ) # train model solver.train() diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index 820e0452ed..b32a786c35 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -238,7 +238,6 @@ def train(cfg: DictConfig): eval_freq=cfg.TRAIN.lbfgs.eval_freq, equation=equation, validator=validator, - cfg=cfg, ) # train model solver.train() diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index a2089b2b22..d69fa38aba 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -196,10 +196,15 @@ def output_transform_p(self, in_, out): solver = ppsci.solver.Solver( model, constraint, - optimizer=optimizer, + cfg.output_dir, + optimizer, + log_freq=cfg.log_freq, + epochs=cfg.TRAIN.epochs, iters_per_epoch=int(x.shape[0] / cfg.TRAIN.batch_size), + save_freq=cfg.save_freq, equation=equation, - cfg=cfg, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, ) solver.train() diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index aaa08bf44e..9ea102e457 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -162,10 +162,15 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - optimizer=optimizer, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=True, validator=validator, - cfg=cfg, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, ) # train model solver.train() diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index 893c996b47..7ca544becc 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -159,10 +159,15 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - iters_per_epoch=ITERS_PER_EPOCH, - optimizer=optimizer, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, validator=validator, - cfg=cfg, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, ) # train model solver.train() diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index 791c44039b..c159d93308 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -152,10 +152,16 @@ def train(cfg: DictConfig): solver = ppsci.solver.Solver( model, constraint, - iters_per_epoch=ITERS_PER_EPOCH, - optimizer=optimizer, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + seed=cfg.seed, validator=validator, - cfg=cfg, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, ) # train model solver.train() From ce524c5f33dbdb0a8dc2261c2f1cc79c7ec04e8d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 03:23:49 +0000 Subject: [PATCH 09/20] fix config unitest --- ppsci/utils/__init__.py | 1 + ppsci/utils/config.py | 4 +- test/utils/test_config.py | 87 ++++++++++++++++----------------------- 3 files changed, 38 insertions(+), 54 deletions(-) diff --git a/ppsci/utils/__init__.py b/ppsci/utils/__init__.py index 0c30d36d95..8cb1a16afe 100644 --- a/ppsci/utils/__init__.py +++ b/ppsci/utils/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ppsci.utils import config # isort:skip # noqa: F401 from ppsci.utils import ema from ppsci.utils import initializer from ppsci.utils import logger diff --git a/ppsci/utils/config.py b/ppsci/utils/config.py index 16e839f3f6..131231b9d6 100644 --- a/ppsci/utils/config.py +++ b/ppsci/utils/config.py @@ -21,8 +21,6 @@ from typing_extensions import Literal -from ppsci.utils import misc - __all__ = [] if importlib.util.find_spec("pydantic") is not None: @@ -314,7 +312,7 @@ def use_wandb_check(cls, v, info: ValidationInfo): if v and not isinstance(info.data["wandb_config"], dict): raise ValueError( "'wandb_config' should be a dict when 'use_wandb' is True, " - f"but got {misc.typename(info.data['wandb_config'])}" + f"but got {info.data['wandb_config'].__class__.__name__}" ) return v diff --git a/test/utils/test_config.py b/test/utils/test_config.py index 5f650685c8..93b135d944 100644 --- a/test/utils/test_config.py +++ b/test/utils/test_config.py @@ -1,25 +1,17 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +import os import hydra import paddle import pytest -from omegaconf import DictConfig +import yaml -paddle.seed(1024) +# 假设你的回调类在这个路径下 +from ppsci.utils.callbacks import InitCallback +# 设置 Paddle 的 seed +paddle.seed(1024) +# 测试函数不需要装饰器 @pytest.mark.parametrize( "epochs,mode,seed", [ @@ -28,42 +20,35 @@ (10, "eval", -1), ], ) -def test_invalid_epochs( - epochs, - mode, - seed, -): - @hydra.main(version_base=None, config_path="./", config_name="test_config.yaml") - def main(cfg: DictConfig): - pass - - # sys.exit will be called when validation error in pydantic, so there we use - # SystemExit instead of other type of errors. - with pytest.raises(SystemExit): - cfg_dict = dict( - { - "TRAIN": { - "epochs": epochs, - }, - "mode": mode, - "seed": seed, - "hydra": { - "callbacks": { - "init_callback": { - "_target_": "ppsci.utils.callbacks.InitCallback" - } - } - }, +def test_invalid_epochs(tmpdir, epochs, mode, seed): + cfg_dict = { + "hydra": { + "callbacks": { + "init_callback": {"_target_": "ppsci.utils.callbacks.InitCallback"} } - ) - # print(cfg_dict) - import yaml - - with open("test_config.yaml", "w") as f: - yaml.dump(dict(cfg_dict), f) - - main() - - + }, + "mode": mode, + "seed": seed, + "TRAIN": { + "epochs": epochs, + }, + } + # 创建一个临时的配置文件 + dir_ = os.path.dirname(__file__) + config_abs_path = os.path.join(dir_, "test_config.yaml") + with open(config_abs_path, "w") as f: + f.write(yaml.dump(cfg_dict)) + + # 使用 hydra 的 compose API 来创建配置,而不是使用 main + with hydra.initialize(config_path="./", version_base=None): + cfg = hydra.compose(config_name="test_config.yaml") + # 手动触发回调 + with pytest.raises(SystemExit) as exec_info: + InitCallback().on_job_start(config=cfg) + assert exec_info.value.code == 2 + # 你现在可以根据需要对 cfg 进行断言或进一步处理 + + +# 这部分通常不需要,除非你想直接从脚本运行测试 if __name__ == "__main__": pytest.main() From d5fb64a20f27338ba8af9639ab9f9dbb70a18165 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 03:23:49 +0000 Subject: [PATCH 10/20] fix config unitest --- ppsci/utils/__init__.py | 3 ++ ppsci/utils/config.py | 4 +- test/utils/test_config.py | 87 ++++++++++++++++----------------------- 3 files changed, 40 insertions(+), 54 deletions(-) diff --git a/ppsci/utils/__init__.py b/ppsci/utils/__init__.py index 0c30d36d95..f61b0a6cd9 100644 --- a/ppsci/utils/__init__.py +++ b/ppsci/utils/__init__.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Put config module import at the top level for register default config(s) in +# ConfigStore at the begining of ppsci +from ppsci.utils import config # isort:skip # noqa: F401 from ppsci.utils import ema from ppsci.utils import initializer from ppsci.utils import logger diff --git a/ppsci/utils/config.py b/ppsci/utils/config.py index 16e839f3f6..131231b9d6 100644 --- a/ppsci/utils/config.py +++ b/ppsci/utils/config.py @@ -21,8 +21,6 @@ from typing_extensions import Literal -from ppsci.utils import misc - __all__ = [] if importlib.util.find_spec("pydantic") is not None: @@ -314,7 +312,7 @@ def use_wandb_check(cls, v, info: ValidationInfo): if v and not isinstance(info.data["wandb_config"], dict): raise ValueError( "'wandb_config' should be a dict when 'use_wandb' is True, " - f"but got {misc.typename(info.data['wandb_config'])}" + f"but got {info.data['wandb_config'].__class__.__name__}" ) return v diff --git a/test/utils/test_config.py b/test/utils/test_config.py index 5f650685c8..93b135d944 100644 --- a/test/utils/test_config.py +++ b/test/utils/test_config.py @@ -1,25 +1,17 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +import os import hydra import paddle import pytest -from omegaconf import DictConfig +import yaml -paddle.seed(1024) +# 假设你的回调类在这个路径下 +from ppsci.utils.callbacks import InitCallback +# 设置 Paddle 的 seed +paddle.seed(1024) +# 测试函数不需要装饰器 @pytest.mark.parametrize( "epochs,mode,seed", [ @@ -28,42 +20,35 @@ (10, "eval", -1), ], ) -def test_invalid_epochs( - epochs, - mode, - seed, -): - @hydra.main(version_base=None, config_path="./", config_name="test_config.yaml") - def main(cfg: DictConfig): - pass - - # sys.exit will be called when validation error in pydantic, so there we use - # SystemExit instead of other type of errors. - with pytest.raises(SystemExit): - cfg_dict = dict( - { - "TRAIN": { - "epochs": epochs, - }, - "mode": mode, - "seed": seed, - "hydra": { - "callbacks": { - "init_callback": { - "_target_": "ppsci.utils.callbacks.InitCallback" - } - } - }, +def test_invalid_epochs(tmpdir, epochs, mode, seed): + cfg_dict = { + "hydra": { + "callbacks": { + "init_callback": {"_target_": "ppsci.utils.callbacks.InitCallback"} } - ) - # print(cfg_dict) - import yaml - - with open("test_config.yaml", "w") as f: - yaml.dump(dict(cfg_dict), f) - - main() - - + }, + "mode": mode, + "seed": seed, + "TRAIN": { + "epochs": epochs, + }, + } + # 创建一个临时的配置文件 + dir_ = os.path.dirname(__file__) + config_abs_path = os.path.join(dir_, "test_config.yaml") + with open(config_abs_path, "w") as f: + f.write(yaml.dump(cfg_dict)) + + # 使用 hydra 的 compose API 来创建配置,而不是使用 main + with hydra.initialize(config_path="./", version_base=None): + cfg = hydra.compose(config_name="test_config.yaml") + # 手动触发回调 + with pytest.raises(SystemExit) as exec_info: + InitCallback().on_job_start(config=cfg) + assert exec_info.value.code == 2 + # 你现在可以根据需要对 cfg 进行断言或进一步处理 + + +# 这部分通常不需要,除非你想直接从脚本运行测试 if __name__ == "__main__": pytest.main() From f114b22e3cff0135322be4f32a4b58310a53f367 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 03:37:28 +0000 Subject: [PATCH 11/20] Fix unitest test_writer --- test/utils/test_writer.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/test/utils/test_writer.py b/test/utils/test_writer.py index 6e960bee28..cce3f69ab8 100644 --- a/test/utils/test_writer.py +++ b/test/utils/test_writer.py @@ -21,13 +21,11 @@ def test_save_csv_file(): keys = ["x1", "y1", "z1"] - alias_dict = ( - { - "x": "x1", - "y": "y1", - "z": "z1", - }, - ) + alias_dict = { + "x": "x1", + "y": "y1", + "z": "z1", + } data_dict = { keys[0]: np.random.randint(0, 255, (10, 1)), keys[1]: np.random.rand(10, 1), From 3ad386f63ff990914f2c5ecfb92bb82ef98f8894 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 04:34:30 +0000 Subject: [PATCH 12/20] fix --- ppsci/solver/solver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ppsci/solver/solver.py b/ppsci/solver/solver.py index 08b94cab04..cde418ea57 100644 --- a/ppsci/solver/solver.py +++ b/ppsci/solver/solver.py @@ -1037,5 +1037,5 @@ def _parse_params_from_cfg(self, cfg: DictConfig): self.pretrained_model_path = cfg.TRAIN.pretrained_model_path elif cfg.mode == "eval": self.pretrained_model_path = cfg.EVAL.pretrained_model_path - elif cfg.mode == "infer": + elif cfg.mode in ["export", "infer"]: self.pretrained_model_path = cfg.INFER.pretrained_model_path From 25a875fada69eeaa3fa828cd4b4b4e3a7200ae91 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 04:36:30 +0000 Subject: [PATCH 13/20] fix --- .../2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml index b7f6f847b3..89530247b6 100644 --- a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml +++ b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml @@ -1,3 +1,12 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name From ac093aeb9c3876b3560357f050da5ba3d936a81b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 12 May 2024 06:43:51 +0000 Subject: [PATCH 14/20] add default exclude_keys --- ppsci/utils/config.py | 54 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/ppsci/utils/config.py b/ppsci/utils/config.py index ffbe76dad2..0352d2f7ce 100644 --- a/ppsci/utils/config.py +++ b/ppsci/utils/config.py @@ -334,32 +334,74 @@ def use_wandb_check(cls, v, info: ValidationInfo): ################## """ + cs = ConfigStore.instance() + global_default_cfg = SolverConfig().model_dump() omegaconf_dict_config = OmegaConf.create(global_default_cfg) - cs = ConfigStore.instance() cs.store(name="ppsci_default", node=omegaconf_dict_config) train_default_cfg = TrainConfig().model_dump() train_omegaconf_dict_config = OmegaConf.create(train_default_cfg) - cs = ConfigStore.instance() cs.store(group="TRAIN", name="train_default", node=train_omegaconf_dict_config) ema_default_cfg = EMAConfig().model_dump() ema_omegaconf_dict_config = OmegaConf.create(ema_default_cfg) - cs = ConfigStore.instance() cs.store(group="TRAIN/ema", name="ema_default", node=ema_omegaconf_dict_config) swa_default_cfg = SWAConfig().model_dump() swa_omegaconf_dict_config = OmegaConf.create(swa_default_cfg) - cs = ConfigStore.instance() cs.store(group="TRAIN/swa", name="swa_default", node=swa_omegaconf_dict_config) eval_default_cfg = EvalConfig().model_dump() eval_omegaconf_dict_config = OmegaConf.create(eval_default_cfg) - cs = ConfigStore.instance() cs.store(group="EVAL", name="eval_default", node=eval_omegaconf_dict_config) infer_default_cfg = InferConfig().model_dump() infer_omegaconf_dict_config = OmegaConf.create(infer_default_cfg) - cs = ConfigStore.instance() cs.store(group="INFER", name="infer_default", node=infer_omegaconf_dict_config) + + exclude_keys_default = [ + "mode", + "output_dir", + "log_freq", + "seed", + "use_vdl", + "use_tbd", + "wandb_config", + "use_wandb", + "device", + "use_amp", + "amp_level", + "to_static", + "prim", + "log_level", + "TRAIN.save_freq", + "TRAIN.eval_during_train", + "TRAIN.start_eval_epoch", + "TRAIN.eval_freq", + "TRAIN.checkpoint_path", + "TRAIN.pretrained_model_path", + "EVAL.pretrained_model_path", + "EVAL.eval_with_no_grad", + "EVAL.compute_metric_by_batch", + "INFER.pretrained_model_path", + "INFER.export_path", + "INFER.pdmodel_path", + "INFER.pdiparams_path", + "INFER.onnx_path", + "INFER.device", + "INFER.engine", + "INFER.precision", + "INFER.ir_optim", + "INFER.min_subgraph_size", + "INFER.gpu_mem", + "INFER.gpu_id", + "INFER.max_batch_size", + "INFER.num_cpu_threads", + "INFER.batch_size", + ] + cs.store( + group="hydra/job/config/override_dirname/exclude_keys", + name="exclude_keys_default", + node=exclude_keys_default, + ) From 3b091b930e44424c4a456315bc2d4d5e966ae83b Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Mon, 13 May 2024 03:21:13 +0000 Subject: [PATCH 15/20] remove unnecessary files --- .../allen_cahn/conf/allen_cahn_causal.yaml | 96 ------------------ .../conf/allen_cahn_causal_fourier.yaml | 99 ------------------- .../allen_cahn/conf/allen_cahn_fourier.yaml | 96 ------------------ 3 files changed, 291 deletions(-) delete mode 100644 examples/allen_cahn/conf/allen_cahn_causal.yaml delete mode 100644 examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml delete mode 100644 examples/allen_cahn/conf/allen_cahn_fourier.yaml diff --git a/examples/allen_cahn/conf/allen_cahn_causal.yaml b/examples/allen_cahn/conf/allen_cahn_causal.yaml deleted file mode 100644 index 41d070d403..0000000000 --- a/examples/allen_cahn/conf/allen_cahn_causal.yaml +++ /dev/null @@ -1,96 +0,0 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_allen_cahn_causal/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA_PATH: ./dataset/allen_cahn.mat - -# model settings -MODEL: - input_keys: [t, x] - output_keys: [u] - num_layers: 4 - hidden_size: 256 - activation: tanh - periods: - x: [2.0, False] - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 2000 - by_epoch: false - batch_size: 4096 - pretrained_model_path: null - checkpoint_path: null - causal: - n_chunks: 32 - tol: 1.0 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams - export_path: ./inference/allen_cahn - pdmodel_path: ${INFER.export_path}.pdmodel - pdpiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml deleted file mode 100644 index 6a3ad83ded..0000000000 --- a/examples/allen_cahn/conf/allen_cahn_causal_fourier.yaml +++ /dev/null @@ -1,99 +0,0 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_allen_cahn_causal_fourier/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA_PATH: ./dataset/allen_cahn.mat - -# model settings -MODEL: - input_keys: [t, x] - output_keys: [u] - num_layers: 4 - hidden_size: 256 - activation: tanh - periods: - x: [2.0, False] - fourier: - dim: 256 - scale: 1.0 - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 2000 - by_epoch: false - batch_size: 4096 - pretrained_model_path: null - checkpoint_path: null - # causal: - # n_chunks: 16 - # tol: 1.0 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams - export_path: ./inference/allen_cahn - pdmodel_path: ${INFER.export_path}.pdmodel - pdpiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 diff --git a/examples/allen_cahn/conf/allen_cahn_fourier.yaml b/examples/allen_cahn/conf/allen_cahn_fourier.yaml deleted file mode 100644 index f2214131d1..0000000000 --- a/examples/allen_cahn/conf/allen_cahn_fourier.yaml +++ /dev/null @@ -1,96 +0,0 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_allen_cahn_fourier/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA_PATH: ./dataset/allen_cahn.mat - -# model settings -MODEL: - input_keys: [t, x] - output_keys: [u] - num_layers: 4 - hidden_size: 256 - activation: tanh - periods: - x: [2.0, False] - fourier: - dim: 256 - scale: 1.0 - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 2000 - by_epoch: false - batch_size: 4096 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams - export_path: ./inference/allen_cahn - pdmodel_path: ${INFER.export_path}.pdmodel - pdpiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 From 6e75c4e7cbf8028b41b6402dbf5f06c4b48c6829 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 7 Jun 2024 13:40:52 +0800 Subject: [PATCH 16/20] remove redundant annotations --- test/utils/test_config.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/utils/test_config.py b/test/utils/test_config.py index 678d28e3c3..9e3af8f468 100644 --- a/test/utils/test_config.py +++ b/test/utils/test_config.py @@ -5,13 +5,11 @@ import pytest import yaml -# 假设你的回调类在这个路径下 from ppsci.utils.callbacks import InitCallback -# 设置 Paddle 的 seed paddle.seed(1024) -# 测试函数不需要装饰器 + @pytest.mark.parametrize( "epochs,mode,seed", [ @@ -46,6 +44,5 @@ def test_invalid_epochs(tmpdir, epochs, mode, seed): assert exec_info.value.code == 2 -# 这部分通常不需要,除非你想直接从脚本运行测试 if __name__ == "__main__": pytest.main() From 15311a3c267286dca842e09404e6916338ff95ad Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 7 Jun 2024 13:51:56 +0800 Subject: [PATCH 17/20] remove more code --- examples/aneurysm/aneurysm.py | 2 -- examples/bracket/bracket.py | 5 ----- examples/gpinn/poisson_1d.py | 2 -- examples/ldc/ldc2d_steady_Re10.py | 2 -- examples/ldc/ldc2d_unsteady_Re10.py | 2 -- examples/neuraloperator/train_sfno.py | 20 -------------------- examples/neuraloperator/train_tfno.py | 20 -------------------- examples/neuraloperator/train_uno.py | 20 -------------------- 8 files changed, 73 deletions(-) diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index 25cc5036c9..a84f89cd38 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -186,7 +186,6 @@ def inlet_w_ref_func(_in): "input": input_dict, "label": label_dict, }, - "sampler": {"name": "BatchSampler"}, "num_workers": 1, } sup_validator = ppsci.validate.SupervisedValidator( @@ -272,7 +271,6 @@ def evaluate(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": {"name": "BatchSampler"}, "num_workers": 1, } sup_validator = ppsci.validate.SupervisedValidator( diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index e5f96aa5af..de98c581d9 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -437,11 +437,6 @@ def evaluate(cfg: DictConfig): "input": input_dict, "label": label_dict, }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, } sup_validator = ppsci.validate.SupervisedValidator( {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size.sup_validator}, diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index ecaa116b3d..8e43baca4f 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -117,7 +117,6 @@ def u_solution(in_): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE_EVAL, "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), evenly=True, @@ -234,7 +233,6 @@ def u_solution(in_): "dataset": "NamedArrayDataset", "total_size": cfg.NPOINT_PDE, "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("mean"), evenly=True, diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index 3dd0c645d6..c36e8e4358 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -114,7 +114,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -179,7 +178,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index eb541365bd..27f83879bd 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -135,7 +135,6 @@ def train(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, @@ -232,7 +231,6 @@ def evaluate(cfg: DictConfig): "dataset": "NamedArrayDataset", "total_size": NPOINT_EVAL, "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, }, ppsci.loss.MSELoss("sum"), evenly=True, diff --git a/examples/neuraloperator/train_sfno.py b/examples/neuraloperator/train_sfno.py index 50843c00e9..f066c56e04 100644 --- a/examples/neuraloperator/train_sfno.py +++ b/examples/neuraloperator/train_sfno.py @@ -53,11 +53,6 @@ def train(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_32x64", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -72,11 +67,6 @@ def train(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_64x128", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -164,11 +154,6 @@ def evaluate(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_32x64", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -183,11 +168,6 @@ def evaluate(cfg: DictConfig): "test_resolutions": cfg.DATASET.test_resolutions, "data_split": "test_64x128", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } diff --git a/examples/neuraloperator/train_tfno.py b/examples/neuraloperator/train_tfno.py index 1612ae5489..2101dbafff 100644 --- a/examples/neuraloperator/train_tfno.py +++ b/examples/neuraloperator/train_tfno.py @@ -68,11 +68,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -92,11 +87,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -195,11 +185,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -219,11 +204,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } diff --git a/examples/neuraloperator/train_uno.py b/examples/neuraloperator/train_uno.py index f73c2a1038..748c895a8a 100644 --- a/examples/neuraloperator/train_uno.py +++ b/examples/neuraloperator/train_uno.py @@ -68,11 +68,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -92,11 +87,6 @@ def train(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -195,11 +185,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_16x16", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } @@ -219,11 +204,6 @@ def evaluate(cfg: DictConfig): "channel_dim": cfg.DATASET.channel_dim, "data_split": "test_32x32", }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, "batch_size": cfg.EVAL.batch_size, "num_workers": 0, } From 79322a327bd3d5c5eb5918587034bdfeae3c4501 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Fri, 7 Jun 2024 20:19:57 +0800 Subject: [PATCH 18/20] print log when reach the training max_steps --- ppsci/solver/solver.py | 2 ++ ppsci/solver/train.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ppsci/solver/solver.py b/ppsci/solver/solver.py index 70cd9acea0..53dcfd275b 100644 --- a/ppsci/solver/solver.py +++ b/ppsci/solver/solver.py @@ -513,6 +513,8 @@ def convert_expr( def train(self) -> None: """Training.""" self.global_step = self.best_metric["epoch"] * self.iters_per_epoch + self.max_steps = self.epochs * self.iters_per_epoch + start_epoch = self.best_metric["epoch"] + 1 if self.use_tbd and isinstance(self.cfg, DictConfig): diff --git a/ppsci/solver/train.py b/ppsci/solver/train.py index 1585621178..f2ede0938d 100644 --- a/ppsci/solver/train.py +++ b/ppsci/solver/train.py @@ -164,7 +164,11 @@ def train_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int): solver.train_time_info["reader_cost"].update(reader_cost) solver.train_time_info["batch_cost"].update(batch_cost) printer.update_train_loss(solver, loss_dict, total_batch_size) - if solver.global_step % log_freq == 0 or solver.global_step == 1: + if ( + solver.global_step % log_freq == 0 + or solver.global_step == 1 + or solver.global_step == solver.max_steps + ): printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) batch_tic = time.perf_counter() @@ -277,7 +281,11 @@ def closure() -> paddle.Tensor: solver.train_time_info["reader_cost"].update(reader_cost) solver.train_time_info["batch_cost"].update(batch_cost) printer.update_train_loss(solver, loss_dict, total_batch_size) - if solver.global_step % log_freq == 0 or solver.global_step == 1: + if ( + solver.global_step % log_freq == 0 + or solver.global_step == 1 + or solver.global_step == solver.max_steps + ): printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) batch_tic = time.perf_counter() From f6312fe20fcbe31e9bbbdfea211a17040056534d Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 8 Jun 2024 00:53:32 +0800 Subject: [PATCH 19/20] add defaults config for all yaml files --- examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml | 10 +--------- examples/NLS-MB/conf/NLS-MB_soliton.yaml | 10 +--------- examples/RegAE/conf/RegAE.yaml | 10 +--------- examples/allen_cahn/conf/allen_cahn.yaml | 20 +++++++++---------- .../conf/allen_cahn_causal_fourier_rwf.yaml | 11 +--------- .../conf/allen_cahn_defalut_ntk.yaml | 20 +++++++++---------- .../allen_cahn/conf/allen_cahn_default.yaml | 20 +++++++++---------- examples/allen_cahn/conf/allen_cahn_sota.yaml | 20 +++++++++---------- examples/amgnet/conf/amgnet_airfoil.yaml | 10 +--------- examples/amgnet/conf/amgnet_cylinder.yaml | 10 +--------- examples/aneurysm/conf/aneurysm.yaml | 11 +--------- examples/aneurysm/conf/aneurysm_flow.yaml | 10 +--------- examples/biharmonic2d/conf/biharmonic2d.yaml | 12 +---------- examples/bracket/conf/bracket.yaml | 10 +--------- examples/bubble/conf/bubble.yaml | 10 +--------- examples/cfdgcn/conf/cfdgcn.yaml | 10 +--------- examples/chip_heat/conf/chip_heat.yaml | 10 +--------- .../control_arm/conf/forward_analysis.yaml | 12 +---------- .../control_arm/conf/inverse_parameter.yaml | 12 +---------- .../conf/cylinder2d_unsteady_Re100.yaml | 10 +--------- .../transformer_physx/conf/enn.yaml | 19 +++++++++--------- .../transformer_physx/conf/transformer.yaml | 10 ++++++++++ examples/darcy/conf/darcy2d.yaml | 10 +--------- examples/deepcfd/conf/deepcfd.yaml | 10 +--------- examples/deephpms/conf/burgers.yaml | 10 +--------- examples/deephpms/conf/korteweg_de_vries.yaml | 10 +--------- .../deephpms/conf/kuramoto_sivashinsky.yaml | 10 +--------- examples/deephpms/conf/navier_stokes.yaml | 10 +--------- examples/deephpms/conf/schrodinger.yaml | 10 +--------- examples/dgmr/conf/dgmr.yaml | 10 +--------- .../conf/earthformer_enso_pretrain.yaml | 19 +++++++++--------- .../conf/earthformer_sevir_pretrain.yaml | 19 +++++++++--------- examples/epnn/conf/epnn.yaml | 10 +--------- examples/euler_beam/conf/euler_beam.yaml | 12 +---------- .../conf/fourcastnet_finetune.yaml | 10 +--------- .../fourcastnet/conf/fourcastnet_precip.yaml | 10 +--------- .../conf/fourcastnet_pretrain.yaml | 10 +--------- examples/fpde/conf/fractional_poisson_2d.yaml | 10 +--------- examples/fsi/conf/viv.yaml | 1 + examples/gpinn/conf/poisson_1d.yaml | 10 +--------- .../heat_exchanger/conf/heat_exchanger.yaml | 10 +--------- examples/heat_pinn/conf/heat_pinn.yaml | 10 +--------- examples/hpinns/conf/hpinns.yaml | 10 +--------- examples/ide/conf/volterra_ide.yaml | 10 +--------- examples/laplace/conf/laplace2d.yaml | 10 +--------- examples/ldc/conf/ldc2d_steady_Re10.yaml | 9 +-------- examples/ldc/conf/ldc2d_unsteady_Re10.yaml | 9 +-------- examples/lorenz/conf/enn.yaml | 10 +--------- examples/lorenz/conf/transformer.yaml | 11 +--------- .../conf/sfno_swe_pretrain.yaml | 19 +++++++++--------- .../conf/tfno_darcyflow_pretrain.yaml | 19 +++++++++--------- .../conf/uno_darcyflow_pretrain.yaml | 19 +++++++++--------- examples/nowcastnet/conf/nowcastnet.yaml | 12 +---------- examples/nsfnet/conf/VP_NSFNet4.yaml | 10 +--------- examples/operator_learning/conf/deeponet.yaml | 10 +--------- examples/phycrnet/conf/burgers_equations.yaml | 10 +--------- .../conf/fitzhugh_nagumo_RD_equation.yaml | 10 +--------- .../conf/lambda_omega_RD_equation.yaml | 10 +--------- examples/phygeonet/conf/heat_equation.yaml | 10 +--------- .../phygeonet/conf/heat_equation_with_bc.yaml | 10 +--------- examples/phylstm/conf/phylstm2.yaml | 10 +--------- examples/phylstm/conf/phylstm3.yaml | 10 +--------- examples/pipe/conf/poiseuille_flow.yaml | 10 +--------- examples/rossler/conf/enn.yaml | 10 +--------- examples/rossler/conf/transformer.yaml | 1 + .../shock_wave/conf/shock_wave_Ma0.728.yaml | 12 +---------- .../shock_wave/conf/shock_wave_Ma2.0.yaml | 12 +---------- examples/tempoGAN/conf/tempogan.yaml | 12 +---------- examples/topopt/conf/topopt.yaml | 18 ++++++++++++++++- examples/yinglong1/conf/yinglong_12.yaml | 11 +--------- examples/yinglong1/conf/yinglong_24.yaml | 11 +--------- 71 files changed, 186 insertions(+), 627 deletions(-) diff --git a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml index 1c14c3dd5e..98764d8908 100644 --- a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml +++ b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/NLS-MB/conf/NLS-MB_soliton.yaml b/examples/NLS-MB/conf/NLS-MB_soliton.yaml index 23bc05376c..94eabfdffe 100644 --- a/examples/NLS-MB/conf/NLS-MB_soliton.yaml +++ b/examples/NLS-MB/conf/NLS-MB_soliton.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/RegAE/conf/RegAE.yaml b/examples/RegAE/conf/RegAE.yaml index d6b05e64ec..2f13c79cbd 100644 --- a/examples/RegAE/conf/RegAE.yaml +++ b/examples/RegAE/conf/RegAE.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn.yaml b/examples/allen_cahn/conf/allen_cahn.yaml index c4cf52253f..4fbc18617d 100644 --- a/examples/allen_cahn/conf/allen_cahn.yaml +++ b/examples/allen_cahn/conf/allen_cahn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml index 8c5ffd2596..4e577f1500 100644 --- a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml +++ b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml index 0a4eac375c..7fe499eed3 100644 --- a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml +++ b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_default.yaml b/examples/allen_cahn/conf/allen_cahn_default.yaml index 4ca236db7a..b5c451de0b 100644 --- a/examples/allen_cahn/conf/allen_cahn_default.yaml +++ b/examples/allen_cahn/conf/allen_cahn_default.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/allen_cahn/conf/allen_cahn_sota.yaml b/examples/allen_cahn/conf/allen_cahn_sota.yaml index 37a0ca1409..207c87d972 100644 --- a/examples/allen_cahn/conf/allen_cahn_sota.yaml +++ b/examples/allen_cahn/conf/allen_cahn_sota.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/amgnet/conf/amgnet_airfoil.yaml b/examples/amgnet/conf/amgnet_airfoil.yaml index 56a96a5b37..c49f7cc632 100644 --- a/examples/amgnet/conf/amgnet_airfoil.yaml +++ b/examples/amgnet/conf/amgnet_airfoil.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/amgnet/conf/amgnet_cylinder.yaml b/examples/amgnet/conf/amgnet_cylinder.yaml index 7c3d74ebfa..cf2d5f1cb2 100644 --- a/examples/amgnet/conf/amgnet_cylinder.yaml +++ b/examples/amgnet/conf/amgnet_cylinder.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/aneurysm/conf/aneurysm.yaml b/examples/aneurysm/conf/aneurysm.yaml index 80f20d81f8..4181bbdcee 100644 --- a/examples/aneurysm/conf/aneurysm.yaml +++ b/examples/aneurysm/conf/aneurysm.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/aneurysm/conf/aneurysm_flow.yaml b/examples/aneurysm/conf/aneurysm_flow.yaml index a47361ab30..785bd02851 100644 --- a/examples/aneurysm/conf/aneurysm_flow.yaml +++ b/examples/aneurysm/conf/aneurysm_flow.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index c95037e8a6..8b4f2dab53 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/bracket/conf/bracket.yaml b/examples/bracket/conf/bracket.yaml index e9fff24454..24ce3df3d9 100644 --- a/examples/bracket/conf/bracket.yaml +++ b/examples/bracket/conf/bracket.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/bubble/conf/bubble.yaml b/examples/bubble/conf/bubble.yaml index e4fb2196c1..bc1ef7c107 100644 --- a/examples/bubble/conf/bubble.yaml +++ b/examples/bubble/conf/bubble.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/cfdgcn/conf/cfdgcn.yaml b/examples/cfdgcn/conf/cfdgcn.yaml index c6e29cb9ea..912497cda2 100644 --- a/examples/cfdgcn/conf/cfdgcn.yaml +++ b/examples/cfdgcn/conf/cfdgcn.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/chip_heat/conf/chip_heat.yaml b/examples/chip_heat/conf/chip_heat.yaml index 876593f685..edd1dd0eba 100644 --- a/examples/chip_heat/conf/chip_heat.yaml +++ b/examples/chip_heat/conf/chip_heat.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/control_arm/conf/forward_analysis.yaml b/examples/control_arm/conf/forward_analysis.yaml index ec7d84383c..bf1ee7b20a 100644 --- a/examples/control_arm/conf/forward_analysis.yaml +++ b/examples/control_arm/conf/forward_analysis.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/control_arm/conf/inverse_parameter.yaml b/examples/control_arm/conf/inverse_parameter.yaml index a1a7c0e884..7127d1454c 100644 --- a/examples/control_arm/conf/inverse_parameter.yaml +++ b/examples/control_arm/conf/inverse_parameter.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml index 89530247b6..d4c672a31f 100644 --- a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml +++ b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml index 8a44e22345..363bac8a25 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml index 819142ce10..02038ad1a6 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index 408659b1a1..352c81a161 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deepcfd/conf/deepcfd.yaml b/examples/deepcfd/conf/deepcfd.yaml index 0673aee3fe..e13b3dcc07 100644 --- a/examples/deepcfd/conf/deepcfd.yaml +++ b/examples/deepcfd/conf/deepcfd.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deephpms/conf/burgers.yaml b/examples/deephpms/conf/burgers.yaml index ae96c44432..cbaaa5e8b8 100644 --- a/examples/deephpms/conf/burgers.yaml +++ b/examples/deephpms/conf/burgers.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deephpms/conf/korteweg_de_vries.yaml b/examples/deephpms/conf/korteweg_de_vries.yaml index d40958371e..750e0e29df 100644 --- a/examples/deephpms/conf/korteweg_de_vries.yaml +++ b/examples/deephpms/conf/korteweg_de_vries.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deephpms/conf/kuramoto_sivashinsky.yaml b/examples/deephpms/conf/kuramoto_sivashinsky.yaml index 23fed5fb31..e0b3fc312a 100644 --- a/examples/deephpms/conf/kuramoto_sivashinsky.yaml +++ b/examples/deephpms/conf/kuramoto_sivashinsky.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deephpms/conf/navier_stokes.yaml b/examples/deephpms/conf/navier_stokes.yaml index 604408a9d2..c8cf10519e 100644 --- a/examples/deephpms/conf/navier_stokes.yaml +++ b/examples/deephpms/conf/navier_stokes.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/deephpms/conf/schrodinger.yaml b/examples/deephpms/conf/schrodinger.yaml index 609c7af029..7cb270d7f5 100644 --- a/examples/deephpms/conf/schrodinger.yaml +++ b/examples/deephpms/conf/schrodinger.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/dgmr/conf/dgmr.yaml b/examples/dgmr/conf/dgmr.yaml index 2d184f20b3..00b4b224dd 100644 --- a/examples/dgmr/conf/dgmr.yaml +++ b/examples/dgmr/conf/dgmr.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/earthformer/conf/earthformer_enso_pretrain.yaml b/examples/earthformer/conf/earthformer_enso_pretrain.yaml index 8f0919a5db..541b96d529 100644 --- a/examples/earthformer/conf/earthformer_enso_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_enso_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml index 7bff22e884..29d31e9e1a 100644 --- a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/epnn/conf/epnn.yaml b/examples/epnn/conf/epnn.yaml index c90a4da315..56a78ee068 100644 --- a/examples/epnn/conf/epnn.yaml +++ b/examples/epnn/conf/epnn.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/euler_beam/conf/euler_beam.yaml b/examples/euler_beam/conf/euler_beam.yaml index 4bf37a1cf8..6827f22514 100644 --- a/examples/euler_beam/conf/euler_beam.yaml +++ b/examples/euler_beam/conf/euler_beam.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fourcastnet/conf/fourcastnet_finetune.yaml b/examples/fourcastnet/conf/fourcastnet_finetune.yaml index b5f638355f..b3300adc51 100644 --- a/examples/fourcastnet/conf/fourcastnet_finetune.yaml +++ b/examples/fourcastnet/conf/fourcastnet_finetune.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fourcastnet/conf/fourcastnet_precip.yaml b/examples/fourcastnet/conf/fourcastnet_precip.yaml index 1870858bb3..c8134f67a3 100644 --- a/examples/fourcastnet/conf/fourcastnet_precip.yaml +++ b/examples/fourcastnet/conf/fourcastnet_precip.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml index 20edfd8942..b8dd24664f 100644 --- a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml +++ b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fpde/conf/fractional_poisson_2d.yaml b/examples/fpde/conf/fractional_poisson_2d.yaml index c0b657b82c..9a0564d6e3 100644 --- a/examples/fpde/conf/fractional_poisson_2d.yaml +++ b/examples/fpde/conf/fractional_poisson_2d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/fsi/conf/viv.yaml b/examples/fsi/conf/viv.yaml index 566769d211..56037e231f 100644 --- a/examples/fsi/conf/viv.yaml +++ b/examples/fsi/conf/viv.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: diff --git a/examples/gpinn/conf/poisson_1d.yaml b/examples/gpinn/conf/poisson_1d.yaml index 6c4083e461..3125b8d08d 100644 --- a/examples/gpinn/conf/poisson_1d.yaml +++ b/examples/gpinn/conf/poisson_1d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/heat_exchanger/conf/heat_exchanger.yaml b/examples/heat_exchanger/conf/heat_exchanger.yaml index 99e760613c..76d6483514 100644 --- a/examples/heat_exchanger/conf/heat_exchanger.yaml +++ b/examples/heat_exchanger/conf/heat_exchanger.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/heat_pinn/conf/heat_pinn.yaml b/examples/heat_pinn/conf/heat_pinn.yaml index a194591b00..1b97c89525 100644 --- a/examples/heat_pinn/conf/heat_pinn.yaml +++ b/examples/heat_pinn/conf/heat_pinn.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/hpinns/conf/hpinns.yaml b/examples/hpinns/conf/hpinns.yaml index c8e9e18f28..71a92cc367 100644 --- a/examples/hpinns/conf/hpinns.yaml +++ b/examples/hpinns/conf/hpinns.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/ide/conf/volterra_ide.yaml b/examples/ide/conf/volterra_ide.yaml index 590882e3a2..cda361ea59 100644 --- a/examples/ide/conf/volterra_ide.yaml +++ b/examples/ide/conf/volterra_ide.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/laplace/conf/laplace2d.yaml b/examples/laplace/conf/laplace2d.yaml index c6dc16e4bb..20591a6f36 100644 --- a/examples/laplace/conf/laplace2d.yaml +++ b/examples/laplace/conf/laplace2d.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/ldc/conf/ldc2d_steady_Re10.yaml b/examples/ldc/conf/ldc2d_steady_Re10.yaml index ed68eaa7af..cd877a47fc 100644 --- a/examples/ldc/conf/ldc2d_steady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_steady_Re10.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,14 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml index 3b766dee87..acdd9c0bd9 100644 --- a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,14 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/lorenz/conf/enn.yaml b/examples/lorenz/conf/enn.yaml index f2199623bf..b3c6cd8113 100644 --- a/examples/lorenz/conf/enn.yaml +++ b/examples/lorenz/conf/enn.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/lorenz/conf/transformer.yaml b/examples/lorenz/conf/transformer.yaml index 93f4c39134..ee3a206420 100644 --- a/examples/lorenz/conf/transformer.yaml +++ b/examples/lorenz/conf/transformer.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,16 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq - - EMBEDDING_MODEL_PATH callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml index 09ce6522b3..f0a8b811f0 100644 --- a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml +++ b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml index b6f7aafd51..104cb6d78f 100644 --- a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml index 72f20b4106..25cfab256d 100644 --- a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory according to running time and override name @@ -5,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working direcotry unchaned - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/nowcastnet/conf/nowcastnet.yaml b/examples/nowcastnet/conf/nowcastnet.yaml index 0f99bfc0fd..52b72b0f36 100644 --- a/examples/nowcastnet/conf/nowcastnet.yaml +++ b/examples/nowcastnet/conf/nowcastnet.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/nsfnet/conf/VP_NSFNet4.yaml b/examples/nsfnet/conf/VP_NSFNet4.yaml index 937d00f5b0..ddc99a783e 100644 --- a/examples/nsfnet/conf/VP_NSFNet4.yaml +++ b/examples/nsfnet/conf/VP_NSFNet4.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/operator_learning/conf/deeponet.yaml b/examples/operator_learning/conf/deeponet.yaml index f5fbe01990..dbcb0d5b9b 100644 --- a/examples/operator_learning/conf/deeponet.yaml +++ b/examples/operator_learning/conf/deeponet.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phycrnet/conf/burgers_equations.yaml b/examples/phycrnet/conf/burgers_equations.yaml index 8510ce7fed..6ad02ba784 100644 --- a/examples/phycrnet/conf/burgers_equations.yaml +++ b/examples/phycrnet/conf/burgers_equations.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml index b1a940a9a4..0317036179 100644 --- a/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml +++ b/examples/phycrnet/conf/fitzhugh_nagumo_RD_equation.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml index bfcaab7451..ad517c035e 100644 --- a/examples/phycrnet/conf/lambda_omega_RD_equation.yaml +++ b/examples/phycrnet/conf/lambda_omega_RD_equation.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phygeonet/conf/heat_equation.yaml b/examples/phygeonet/conf/heat_equation.yaml index dcdacdd93c..991a0fbddb 100644 --- a/examples/phygeonet/conf/heat_equation.yaml +++ b/examples/phygeonet/conf/heat_equation.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phygeonet/conf/heat_equation_with_bc.yaml b/examples/phygeonet/conf/heat_equation_with_bc.yaml index f18cbeff90..136b95879a 100644 --- a/examples/phygeonet/conf/heat_equation_with_bc.yaml +++ b/examples/phygeonet/conf/heat_equation_with_bc.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phylstm/conf/phylstm2.yaml b/examples/phylstm/conf/phylstm2.yaml index 07f636380a..b3dc1ddd6c 100644 --- a/examples/phylstm/conf/phylstm2.yaml +++ b/examples/phylstm/conf/phylstm2.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/phylstm/conf/phylstm3.yaml b/examples/phylstm/conf/phylstm3.yaml index d211f034e1..67326c3aa0 100644 --- a/examples/phylstm/conf/phylstm3.yaml +++ b/examples/phylstm/conf/phylstm3.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/pipe/conf/poiseuille_flow.yaml b/examples/pipe/conf/poiseuille_flow.yaml index 7fe9a161da..6329081ca0 100644 --- a/examples/pipe/conf/poiseuille_flow.yaml +++ b/examples/pipe/conf/poiseuille_flow.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/rossler/conf/enn.yaml b/examples/rossler/conf/enn.yaml index 3f06741828..c2ab876b3f 100644 --- a/examples/rossler/conf/enn.yaml +++ b/examples/rossler/conf/enn.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,15 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/rossler/conf/transformer.yaml b/examples/rossler/conf/transformer.yaml index 1a8386c0d7..92829a2181 100644 --- a/examples/rossler/conf/transformer.yaml +++ b/examples/rossler/conf/transformer.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: diff --git a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml index 79ff84a127..d4ca2c1c05 100644 --- a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml index a23cc6b25b..0051030bf8 100644 --- a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index b23000dba8..b329f3c8c0 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -14,17 +15,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - INFER.export_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/topopt/conf/topopt.yaml b/examples/topopt/conf/topopt.yaml index b38ab20edf..8642ec4646 100644 --- a/examples/topopt/conf/topopt.yaml +++ b/examples/topopt/conf/topopt.yaml @@ -1,3 +1,13 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + hydra: run: # dynamic output directory @@ -70,7 +80,13 @@ EVAL: # inference settings INFER: pretrained_model_name: null # a string, indicating which model you want to export. Support [Uniform, Poisson5, Poisson10, Poisson30]. - pretrained_model_path_dict: {'Uniform': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/uniform_pretrained.pdparams', 'Poisson5': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson5_pretrained.pdparams', 'Poisson10': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson10_pretrained.pdparams', 'Poisson30': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson30_pretrained.pdparams'} + pretrained_model_path_dict: + { + "Uniform": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/uniform_pretrained.pdparams", + "Poisson5": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson5_pretrained.pdparams", + "Poisson10": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson10_pretrained.pdparams", + "Poisson30": "https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson30_pretrained.pdparams", + } export_path: ./inference/topopt_${INFER.pretrained_model_name} pdmodel_path: ${INFER.export_path}.pdmodel pdiparams_path: ${INFER.export_path}.pdiparams diff --git a/examples/yinglong1/conf/yinglong_12.yaml b/examples/yinglong1/conf/yinglong_12.yaml index b0e307e080..dc6140073b 100644 --- a/examples/yinglong1/conf/yinglong_12.yaml +++ b/examples/yinglong1/conf/yinglong_12.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -15,16 +16,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback diff --git a/examples/yinglong1/conf/yinglong_24.yaml b/examples/yinglong1/conf/yinglong_24.yaml index 20c635bf50..7f187e933d 100644 --- a/examples/yinglong1/conf/yinglong_24.yaml +++ b/examples/yinglong1/conf/yinglong_24.yaml @@ -5,6 +5,7 @@ defaults: - TRAIN/swa: swa_default - EVAL: eval_default - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - _self_ hydra: @@ -15,16 +16,6 @@ hydra: job: name: ${mode} # name of logfile chdir: false # keep current working directory unchanged - config: - override_dirname: - exclude_keys: - - TRAIN.checkpoint_path - - TRAIN.pretrained_model_path - - EVAL.pretrained_model_path - - INFER.pretrained_model_path - - mode - - output_dir - - log_freq callbacks: init_callback: _target_: ppsci.utils.callbacks.InitCallback From 67a315566cacc3dd38990054b99cb7adda8075e9 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sat, 15 Jun 2024 13:26:37 +0800 Subject: [PATCH 20/20] simplify more code --- examples/aneurysm/aneurysm.py | 1 - examples/aneurysm/aneurysm_flow.py | 1 - examples/bracket/bracket.py | 1 - examples/earthformer/earthformer_enso_train.py | 7 ++----- examples/fourcastnet/train_pretrain.py | 1 - examples/lorenz/train_enn.py | 13 ------------- examples/lorenz/train_transformer.py | 5 ----- examples/neuraloperator/train_sfno.py | 2 -- examples/neuraloperator/train_tfno.py | 2 -- examples/neuraloperator/train_uno.py | 2 -- examples/topopt/topopt.py | 1 - 11 files changed, 2 insertions(+), 34 deletions(-) diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index a84f89cd38..0a87d4728b 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -52,7 +52,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index d69fa38aba..3595521422 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -177,7 +177,6 @@ def output_transform_p(self, in_, out): geom=geom["interior"], dataloader_cfg={ "dataset": "NamedArrayDataset", - "num_workers": 1, "batch_size": cfg.TRAIN.batch_size, "iters_per_epoch": int(x.shape[0] / cfg.TRAIN.batch_size), "sampler": { diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index de98c581d9..4528046f79 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -59,7 +59,6 @@ def train(cfg: DictConfig): "drop_last": True, "shuffle": True, }, - "num_workers": 1, } # set constraint diff --git a/examples/earthformer/earthformer_enso_train.py b/examples/earthformer/earthformer_enso_train.py index 120654c704..ea0bd2c5d3 100644 --- a/examples/earthformer/earthformer_enso_train.py +++ b/examples/earthformer/earthformer_enso_train.py @@ -131,11 +131,9 @@ def train(cfg: DictConfig): constraint, cfg.output_dir, optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -185,7 +183,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index c159d93308..4c7cce9766 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -158,7 +158,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=True, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index 0a31c2fcd3..c7c40a6af4 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -18,15 +18,12 @@ # This file is for step1: training a embedding model. # This file is based on PaddleScience/ppsci API. -from os import path as osp - import hydra import numpy as np import paddle from omegaconf import DictConfig import ppsci -from ppsci.utils import logger def get_mean_std(data: np.ndarray): @@ -40,11 +37,6 @@ def get_mean_std(data: np.ndarray): def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) @@ -151,11 +143,6 @@ def train(cfg: DictConfig): def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) regularization_key = "k_matrix" # manually build constraint(s) diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index af2ce55738..944add9663 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -56,11 +56,6 @@ def train(cfg: DictConfig): # train time-series: 2048 time-steps: 256 block-size: 64 stride: 64 # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 # test time-series: 256 time-steps: 1024 - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) output_transform = OutputTransform(embedding_model) diff --git a/examples/neuraloperator/train_sfno.py b/examples/neuraloperator/train_sfno.py index f066c56e04..92fe4ad52c 100644 --- a/examples/neuraloperator/train_sfno.py +++ b/examples/neuraloperator/train_sfno.py @@ -130,7 +130,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -203,7 +202,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/neuraloperator/train_tfno.py b/examples/neuraloperator/train_tfno.py index 2101dbafff..90c33b4615 100644 --- a/examples/neuraloperator/train_tfno.py +++ b/examples/neuraloperator/train_tfno.py @@ -156,7 +156,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -251,7 +250,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/neuraloperator/train_uno.py b/examples/neuraloperator/train_uno.py index 748c895a8a..9770bfbb77 100644 --- a/examples/neuraloperator/train_uno.py +++ b/examples/neuraloperator/train_uno.py @@ -156,7 +156,6 @@ def train(cfg: DictConfig): cfg.TRAIN.epochs, ITERS_PER_EPOCH, eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, validator=validator, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, eval_with_no_grad=cfg.EVAL.eval_with_no_grad, @@ -251,7 +250,6 @@ def evaluate(cfg: DictConfig): model, output_dir=cfg.output_dir, log_freq=cfg.log_freq, - seed=cfg.seed, validator=validator, pretrained_model_path=cfg.EVAL.pretrained_model_path, compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, diff --git a/examples/topopt/topopt.py b/examples/topopt/topopt.py index 8807e03d72..cb579dd2e7 100644 --- a/examples/topopt/topopt.py +++ b/examples/topopt/topopt.py @@ -224,7 +224,6 @@ def evaluate_model( "drop_last": False, "shuffle": True, }, - "num_workers": 0, }, ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), {"output": lambda out: out["output"]},