diff --git a/checkpoint b/checkpoint deleted file mode 100644 index aa6f74047b..0000000000 --- a/checkpoint +++ /dev/null @@ -1 +0,0 @@ -model.ckpt-1.pt diff --git a/deepmd/dpmodel/fitting/property_fitting.py b/deepmd/dpmodel/fitting/property_fitting.py index 2df4b75494..ae82fba7af 100644 --- a/deepmd/dpmodel/fitting/property_fitting.py +++ b/deepmd/dpmodel/fitting/property_fitting.py @@ -41,10 +41,6 @@ class PropertyFittingNet(InvarFitting): this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. intensive Whether the fitting property is intensive. - bias_method - The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. - If 'normal' is used, the computed bias will be added to the atomic output. - If 'no_bias' is used, no bias will be added to the atomic output. resnet_dt Time-step `dt` in the resnet construction: :math:`y = x + dt * \phi (Wx + b)` @@ -74,7 +70,6 @@ def __init__( rcond: Optional[float] = None, trainable: Union[bool, list[bool]] = True, intensive: bool = False, - bias_method: str = "normal", property_name: Union[str, list] = "property", resnet_dt: bool = True, numb_fparam: int = 0, @@ -90,7 +85,6 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - self.bias_method = bias_method self.property_name = property_name super().__init__( var_name="property", diff --git a/deepmd/dpmodel/output_def.py b/deepmd/dpmodel/output_def.py index f525b67c4d..e3f79f654f 100644 --- a/deepmd/dpmodel/output_def.py +++ b/deepmd/dpmodel/output_def.py @@ -5,6 +5,7 @@ ) from typing import ( Union, + Optional, ) @@ -200,7 +201,7 @@ def __init__( r_hessian: bool = False, magnetic: bool = False, intensive: bool = False, - sub_var_name: Union[list[str], str, None] = None, + sub_var_name: Optional[Union[list[str], str]] = None, ) -> None: self.name = name self.shape = list(shape) diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 105b28c709..e906825fcf 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -842,13 +842,15 @@ def test_property( concat_aproperty = [] for name, dim in zip(property_name, property_dim): test_data[name] = test_data[name].reshape([numb_test, dim]) - test_data[f"atom_{name}"] = test_data[f"atom_{name}"].reshape( - [numb_test, natoms * dim] - ) concat_property.append(test_data[name]) - concat_aproperty.append(test_data[f"atom_{name}"]) + if has_atom_property: + test_data[f"atom_{name}"] = test_data[f"atom_{name}"].reshape( + [numb_test, natoms * dim] + ) + concat_aproperty.append(test_data[f"atom_{name}"]) test_data["property"] = np.concatenate(concat_property, axis=1) - test_data["atom_property"] = np.concatenate(concat_aproperty, axis=1) + if has_atom_property: + test_data["atom_property"] = np.concatenate(concat_aproperty, axis=1) diff_property = property - test_data["property"][:numb_test] mae_property = mae(diff_property) diff --git a/deepmd/infer/deep_property.py b/deepmd/infer/deep_property.py index f4dd6d8b3f..e2d207fc74 100644 --- a/deepmd/infer/deep_property.py +++ b/deepmd/infer/deep_property.py @@ -142,11 +142,11 @@ def get_intensive(self) -> bool: return self.deep_eval.get_intensive() def get_property_name(self) -> Union[list[str], str]: - """Get the name of the property.""" + """Get the names of the properties.""" return self.deep_eval.get_property_name() def get_property_dim(self) -> Union[list[int], int]: - """Get the dimension of the property.""" + """Get the dimensions of the properties.""" return self.deep_eval.get_property_dim() diff --git a/deepmd/pt/loss/property.py b/deepmd/pt/loss/property.py index 0de91295dc..46ffc7153d 100644 --- a/deepmd/pt/loss/property.py +++ b/deepmd/pt/loss/property.py @@ -24,11 +24,11 @@ class PropertyLoss(TaskLoss): def __init__( self, task_dim, + property_name: Union[str, list], + property_dim: Union[int, list], loss_func: str = "smooth_mae", metric: list = ["mae"], beta: float = 1.00, - property_name: Union[str, list] = "property", - property_dim: Union[int, list] = 1, out_bias: Union[list, None] = None, out_std: Union[list, None] = None, **kwargs, @@ -57,6 +57,7 @@ def __init__( property_dim = [property_dim] self.property_name = property_name assert self.task_dim == sum(property_dim) + assert len(property_name) == len(property_dim), f"The shape of the `property_name` you provide must be consistent with the `property_dim`, but your `property_name` is {property_name} and your `property_dim` is {property_dim}!" self.property_name_dim_mapping = dict(zip(property_name, property_dim)) self.out_bias = out_bias self.out_std = out_std diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 491a524da8..770c4406d0 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -98,6 +98,11 @@ def _get_standard_model_components(model_params, ntypes): fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: fitting_net["return_energy"] = True + if "property" in fitting_net["type"]: + if isinstance(fitting_net["property_dim"], list): + fitting_net["task_dim"] = sum(fitting_net["property_dim"]) + else: + fitting_net["task_dim"] = fitting_net["property_dim"] fitting = BaseFitting(**fitting_net) return descriptor, fitting, fitting_net["type"] diff --git a/deepmd/pt/model/task/property.py b/deepmd/pt/model/task/property.py index 37b3e7b0a8..c58c09ee09 100644 --- a/deepmd/pt/model/task/property.py +++ b/deepmd/pt/model/task/property.py @@ -51,10 +51,6 @@ class PropertyFittingNet(InvarFitting): Average property per atom for each element. intensive : bool, optional Whether the fitting property is intensive. - bias_method : str, optional - The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. - If 'normal' is used, the computed bias will be added to the atomic output. - If 'no_bias' is used, no bias will be added to the atomic output. resnet_dt : bool Using time-step in the ResNet construction. numb_fparam : int @@ -78,13 +74,12 @@ def __init__( self, ntypes: int, dim_descrpt: int, - task_dim: int = 1, + task_dim: int, + property_name: Union[str, list], + property_dim: Union[int, list] = 1, neuron: list[int] = [128, 128, 128], bias_atom_p: Optional[torch.Tensor] = None, intensive: bool = False, - bias_method: str = "normal", - property_name: Union[str, list] = "property", - property_dim: Union[int, list] = 1, resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -97,7 +92,6 @@ def __init__( ) -> None: self.task_dim = task_dim self.intensive = intensive - self.bias_method = bias_method if isinstance(property_name, str): property_name = [property_name] self.property_name = property_name @@ -122,9 +116,6 @@ def __init__( **kwargs, ) - def get_bias_method(self) -> str: - return self.bias_method - def output_def(self) -> FittingOutputDef: return FittingOutputDef( [ diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index 72e84d577a..b7dbc13cf4 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1240,7 +1240,11 @@ def get_loss(loss_params, start_lr, _ntypes, _model): return TensorLoss(**loss_params) elif loss_type == "property": task_dim = _model.get_task_dim() + property_name = _model.get_property_name() + property_dim = _model.get_property_dim() loss_params["task_dim"] = task_dim + loss_params["property_name"] = property_name + loss_params["property_dim"] = property_dim return PropertyLoss(**loss_params) else: loss_params["starter_learning_rate"] = start_lr diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index 07e6cecbaa..115493cbfb 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -518,20 +518,18 @@ def compute_output_stats_global( assert ii in std_atom_e.keys() concat_bias.append(bias_atom_e[ii]) concat_std.append(std_atom_e[ii]) - del bias_atom_e, std_atom_e - bias_atom_e = {} - std_atom_e = {} - bias_atom_e["property"] = np.concatenate(concat_bias, axis=-1) - std_atom_e["property"] = np.concatenate(concat_std, axis=-1) - std_atom_e["property"] = np.tile( - std_atom_e["property"], (bias_atom_e["property"].shape[0], 1) - ) + bias_atom_e = {"property": np.concatenate(concat_bias, axis=-1)} + std_atom_e = {"property": np.tile( + np.concatenate(concat_std, axis=-1), + (bias_atom_e["property"].shape[0], 1) + )} return bias_atom_e, std_atom_e bias_atom_e, std_atom_e = _post_process_stat(bias_atom_e, std_atom_e) # unbias_e is only used for print rmse + if model_pred is None: unbias_e = { kk: merged_natoms[kk] @ bias_atom_e[kk].reshape(ntypes, -1) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 5044489a69..ff54595d99 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1578,10 +1578,9 @@ def fitting_property(): doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." doc_seed = "Random seed for parameter initialization of the fitting net" - doc_task_dim = "The dimension of outputs of fitting net" doc_intensive = "Whether the fitting property is intensive" - doc_bias_method = "The method of applying the bias to each atomic output, user can select 'normal' or 'no_bias'. If 'no_bias' is used, no bias will be added to the atomic output." - doc_property_name = "TODO" + doc_property_name = "The names of fitting properties, which should be consistent with the property names in the dataset." + doc_property_dim = "The dimensions of fitting properties, which should be consistent with the property dimensions in the dataset." return [ Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), @@ -1610,11 +1609,7 @@ def fitting_property(): Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt), Argument("precision", str, optional=True, default="default", doc=doc_precision), Argument("seed", [int, None], optional=True, doc=doc_seed), - Argument("task_dim", int, optional=True, default=1, doc=doc_task_dim), Argument("intensive", bool, optional=True, default=False, doc=doc_intensive), - Argument( - "bias_method", str, optional=True, default="normal", doc=doc_bias_method - ), Argument( "property_name", [str, list], @@ -1622,6 +1617,13 @@ def fitting_property(): default="property", doc=doc_property_name, ), + Argument( + "property_dim", + [int, list], + optional=True, + default=1, + doc=doc_property_dim, + ), ] @@ -2489,8 +2491,6 @@ def loss_property(): doc_loss_func = "The loss function to minimize, such as 'mae','smooth_mae'." doc_metric = "The metric for display. This list can include 'smooth_mae', 'mae', 'mse' and 'rmse'." doc_beta = "The 'beta' parameter in 'smooth_mae' loss." - doc_property_name = "The names of fitting properties, which should be consistent with the property names in the dataset." - doc_property_dim = "The dimensions of fitting properties, which should be consistent with the property dimensions in the dataset." return [ Argument( "loss_func", @@ -2512,21 +2512,7 @@ def loss_property(): optional=True, default=1.00, doc=doc_beta, - ), - Argument( - "property_name", - [str, list], - optional=True, - default="property", - doc=doc_property_name, - ), - Argument( - "property_dim", - [int, list], - optional=True, - default=1, - doc=doc_property_dim, - ), + ) ]