Skip to content

Commit

Permalink
Merge branch 'devel' of https://github.com/deepmodeling/deepmd-kit in…
Browse files Browse the repository at this point in the history
…to dis
  • Loading branch information
CaRoLZhangxy committed Apr 15, 2024
2 parents 1afd8fc + 3eb3d51 commit 7f6632a
Show file tree
Hide file tree
Showing 38 changed files with 1,594 additions and 416 deletions.
4 changes: 4 additions & 0 deletions .git_archival.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
node: $Format:%H$
node-date: $Format:%cI$
describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
ref-names: $Format:%D$
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# do not show up detailed difference on GitHub
source/3rdparty/* linguist-generated=true
source/3rdparty/README.md linguist-generated=false
.git_archival.txt export-subst
2 changes: 2 additions & 0 deletions .github/workflows/package_c.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ jobs:
filename: libdeepmd_c_cu11.tar.gz
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Package C library
run: ./source/install/docker_package_c.sh
env:
Expand Down
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.6.0
hooks:
- id: trailing-whitespace
exclude: "^.+\\.pbtxt$"
Expand Down Expand Up @@ -52,7 +52,7 @@ repos:
- id: blacken-docs
# C++
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v18.1.2
rev: v18.1.3
hooks:
- id: clang-format
exclude: ^source/3rdparty|source/lib/src/gpu/cudart/.+\.inc
Expand Down
113 changes: 113 additions & 0 deletions deepmd/dpmodel/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
from typing import (
Dict,
List,
Expand Down Expand Up @@ -30,11 +31,44 @@ def __init__(
type_map: List[str],
atom_exclude_types: List[int] = [],
pair_exclude_types: List[Tuple[int, int]] = [],
rcond: Optional[float] = None,
preset_out_bias: Optional[Dict[str, np.ndarray]] = None,
):
super().__init__()
self.type_map = type_map
self.reinit_atom_exclude(atom_exclude_types)
self.reinit_pair_exclude(pair_exclude_types)
self.rcond = rcond
self.preset_out_bias = preset_out_bias

def init_out_stat(self):
"""Initialize the output bias."""
ntypes = self.get_ntypes()
self.bias_keys: List[str] = list(self.fitting_output_def().keys())
self.max_out_size = max(
[self.atomic_output_def()[kk].size for kk in self.bias_keys]
)
self.n_out = len(self.bias_keys)
out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size])
out_std_data = np.ones([self.n_out, ntypes, self.max_out_size])
self.out_bias = out_bias_data
self.out_std = out_std_data

def __setitem__(self, key, value):
if key in ["out_bias"]:
self.out_bias = value
elif key in ["out_std"]:
self.out_std = value
else:
raise KeyError(key)

def __getitem__(self, key):
if key in ["out_bias"]:
return self.out_bias
elif key in ["out_std"]:
return self.out_std
else:
raise KeyError(key)

def get_type_map(self) -> List[str]:
"""Get the type map."""
Expand Down Expand Up @@ -132,6 +166,7 @@ def forward_common_atomic(
fparam=fparam,
aparam=aparam,
)
ret_dict = self.apply_out_stat(ret_dict, atype)

# nf x nloc
atom_mask = ext_atom_mask[:, :nloc].astype(np.int32)
Expand All @@ -150,6 +185,84 @@ def forward_common_atomic(

def serialize(self) -> dict:
return {
"type_map": self.type_map,
"atom_exclude_types": self.atom_exclude_types,
"pair_exclude_types": self.pair_exclude_types,
"rcond": self.rcond,
"preset_out_bias": self.preset_out_bias,
"@variables": {
"out_bias": self.out_bias,
"out_std": self.out_std,
},
}

@classmethod
def deserialize(cls, data: dict) -> "BaseAtomicModel":
data = copy.deepcopy(data)
variables = data.pop("@variables")
obj = cls(**data)
for kk in variables.keys():
obj[kk] = variables[kk]
return obj

def apply_out_stat(
self,
ret: Dict[str, np.ndarray],
atype: np.ndarray,
):
"""Apply the stat to each atomic output.
The developer may override the method to define how the bias is applied
to the atomic output of the model.
Parameters
----------
ret
The returned dict by the forward_atomic method
atype
The atom types. nf x nloc
"""
out_bias, out_std = self._fetch_out_stat(self.bias_keys)
for kk in self.bias_keys:
# nf x nloc x odims, out_bias: ntypes x odims
ret[kk] = ret[kk] + out_bias[kk][atype]
return ret

def _varsize(
self,
shape: List[int],
) -> int:
output_size = 1
len_shape = len(shape)
for i in range(len_shape):
output_size *= shape[i]
return output_size

def _get_bias_index(
self,
kk: str,
) -> int:
res: List[int] = []
for i, e in enumerate(self.bias_keys):
if e == kk:
res.append(i)
assert len(res) == 1
return res[0]

def _fetch_out_stat(
self,
keys: List[str],
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
ret_bias = {}
ret_std = {}
ntypes = self.get_ntypes()
for kk in keys:
idx = self._get_bias_index(kk)
isize = self._varsize(self.atomic_output_def()[kk].shape)
ret_bias[kk] = self.out_bias[idx, :, :isize].reshape(
[ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005
)
ret_std[kk] = self.out_std[idx, :, :isize].reshape(
[ntypes] + list(self.atomic_output_def()[kk].shape) # noqa: RUF005
)
return ret_bias, ret_std
33 changes: 7 additions & 26 deletions deepmd/dpmodel/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,12 @@ def __init__(
type_map: List[str],
**kwargs,
):
super().__init__(type_map, **kwargs)
self.type_map = type_map
self.descriptor = descriptor
self.fitting = fitting
self.type_map = type_map
super().__init__(type_map, **kwargs)
super().init_out_stat()

def fitting_output_def(self) -> FittingOutputDef:
"""Get the output def of the fitting net."""
Expand All @@ -79,27 +80,6 @@ def mixed_types(self) -> bool:
"""
return self.descriptor.mixed_types()

def set_out_bias(self, out_bias: np.ndarray, add=False) -> None:
"""
Modify the output bias for the atomic model.
Parameters
----------
out_bias : np.ndarray
The new bias to be applied.
add : bool, optional
Whether to add the new bias to the existing one.
If False, the output bias will be directly replaced by the new bias.
If True, the new bias will be added to the existing one.
"""
self.fitting["bias_atom_e"] = (
out_bias + self.fitting["bias_atom_e"] if add else out_bias
)

def get_out_bias(self) -> np.ndarray:
"""Return the output bias of the atomic model."""
return self.fitting["bias_atom_e"]

def forward_atomic(
self,
extended_coord: np.ndarray,
Expand Down Expand Up @@ -157,7 +137,7 @@ def serialize(self) -> dict:
{
"@class": "Model",
"type": "standard",
"@version": 1,
"@version": 2,
"type_map": self.type_map,
"descriptor": self.descriptor.serialize(),
"fitting": self.fitting.serialize(),
Expand All @@ -168,13 +148,14 @@ def serialize(self) -> dict:
@classmethod
def deserialize(cls, data) -> "DPAtomicModel":
data = copy.deepcopy(data)
check_version_compatibility(data.pop("@version", 1), 1, 1)
check_version_compatibility(data.pop("@version", 1), 2, 2)
data.pop("@class")
data.pop("type")
descriptor_obj = BaseDescriptor.deserialize(data.pop("descriptor"))
fitting_obj = BaseFitting.deserialize(data.pop("fitting"))
type_map = data.pop("type_map")
obj = cls(descriptor_obj, fitting_obj, type_map=type_map, **data)
data["descriptor"] = descriptor_obj
data["fitting"] = fitting_obj
obj = super().deserialize(data)
return obj

def get_dim_fparam(self) -> int:
Expand Down
Loading

0 comments on commit 7f6632a

Please sign in to comment.