diff --git a/.github/workflows/build-wheels.yaml b/.github/workflows/build-wheels.yaml
index 8c29131c..9eddae3a 100644
--- a/.github/workflows/build-wheels.yaml
+++ b/.github/workflows/build-wheels.yaml
@@ -12,7 +12,7 @@ jobs:
# Exclude windows-2019
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Build wheels
@@ -30,7 +30,7 @@ jobs:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Build sdist
run: pipx run build --sdist
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
index 16147791..4ccabcf7 100644
--- a/.github/workflows/lint.yaml
+++ b/.github/workflows/lint.yaml
@@ -9,8 +9,8 @@ jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v3
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v4
with:
- python-version: 3.8
- - uses: pre-commit/action@v3.0.0
+ python-version: 3.9
+ - uses: pre-commit/action@v3.0.1
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e679fa93..a5a8643d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -7,12 +7,11 @@ repos:
args: [--unsafe]
- id: end-of-file-fixer
- id: trailing-whitespace
-- repo: https://github.com/codespell-project/codespell
- rev: v2.2.1
+- repo: https://github.com/astral-sh/ruff-pre-commit
+ # Ruff version.
+ rev: v0.1.6
hooks:
- - id: codespell
- args: [--write-changes, "-L ure,nd,ue,parms,Ue,statics,indexs"]
-- repo: https://github.com/psf/black
- rev: 22.6.0
- hooks:
- - id: black
+ # Run the linter.
+ - id: ruff
+ # Run the formatter.
+ - id: ruff-format
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1a545a0b..cddef6f4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,23 @@ All notable changes to BEAT will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+## [2.0.0] 24 March 2024
+Major new release! Previous project setups are not back-compatible. Supports python3.9+.
+
+Contributors: Hannes Vasyura-Bathke @hvasbath
+
+### Added
+- new inference mode: "bem" for Boundary Element Modeling
+- allow for multi-source type inference: e.g. MTSource, RectangularSource
+- added parameter defaults module and config functionality
+
+### Changed
+- using pymc v5 and pytensor instead of pymc v3 and theano --> makes old setups incompatible
+- n_sources in config is now list of integers, previous: integer
+- source_type to: list of source_types, variable changed to source_types
+- adopted ruff linting
+- replace nose with pytest testing
+
## [1.2.5] 24 Mai 2023
Contributors: Hannes Vasyura-Bathke @hvasbath
diff --git a/README.md b/README.md
index 314cf659..b8a3744c 100644
--- a/README.md
+++ b/README.md
@@ -3,10 +3,38 @@
# Bayesian Earthquake Analysis Tool
-If you came looking for the beat package calculating internet time you can find it [here](https://github.com/tonyskapunk/beat).
+Documentation of the current version moved to the pyrocko server can be found here:
+https://pyrocko.org/beat/
+
+Based on pyrocko, pytensor and pymc
+
+## Tutorials
+Step by step points on how to use the tool for several scenarios can be found here:
+[Examples](https://pyrocko.org/beat/docs/current/examples/index.html#)
+
+## Citation
+If your work results in an publication where you used BEAT we kindly ask you to consider citing the BEAT software package and the related article.:
+
+ > Vasyura-Bathke, Hannes; Dettmer, Jan; Steinberg, Andreas; Heimann, Sebastian; Isken, Marius; Zielke, Olaf; Mai, Paul Martin; Sudhaus, Henriette; Jónsson, Sigurjón: The Bayesian Earthquake Analysis Tool. Seismological Research Letters. https://doi.org/10.1785/0220190075
+
+ > Vasyura-Bathke, Hannes; Dettmer, Jan; Steinberg, Andreas; Heimann, Sebastian; Isken, Marius; Zielke, Olaf; Mai, Paul Martin; Sudhaus, Henriette; Jónsson, Sigurjón (2019): BEAT - Bayesian Earthquake Analysis Tool. V. 1.0. GFZ Data Services. http://doi.org/10.5880/fidgeo.2019.024
+
+
+## Support
+For substantial issues please use and check the "issues" tab here in the repository.
+For common issues please check out the BEAT [FAQ](https://pyrocko.org/beat/docs/current/faq.html).
+For smaller issues or short clarifications there is a support [chat](https://hive.pyrocko.org/pyrocko-support/channels/beat). This is provided by the pyrocko project and is accessible after a short account creation.
+
+Finally, there is the option to write an email to:
+
+Hannes Vasyura-Bathke
+hvasbath@uni-potsdam.de
+
+Andreas Steinberg
+andreas.steinberg@ifg.uni-kiel.de
-Based on pyrocko, theano and pymc3
+## Changelog
14 February 2023
Version 1.2.4 is released. Details in the [changelog](https://github.com/hvasbath/beat/blob/master/CHANGELOG.md).
@@ -34,8 +62,6 @@ New [tutorial](https://pyrocko.org/beat/docs/current/examples/FFI_static_resolut
A new version 1.1.0 is released adding support for multi-segmented fault setups and tensile dislocations.
Checkout the [changelog](https://github.com/hvasbath/beat/blob/master/CHANGELOG.md) for all the details.
-Documentation of the current version moved to the pyrocko server can be found here:
-https://pyrocko.org/beat/
New [tutorial](https://pyrocko.org/beat/docs/current/examples/Rectangular_tensile.html) on tensile dislocation modeling.
@@ -49,8 +75,6 @@ current academic system, I had to decide for a delayed release of the documentat
be available provided by someone. Thus, in case you are willing to contribute I would be more than happy to guide/ support
you in writing parts of the documentation for a particular feature-if you want to try it out.
-The legacy documentation of beat v1.0. can be found under: https://hvasbath.github.io/beat/
-
## License
GNU General Public License, Version 3, 29 June 2007
@@ -61,18 +85,6 @@ BEAT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY
You should have received a copy of the GNU General Public License along with this program. If not, see .
-## Citation
-If your work results in an publication where you used BEAT we kindly ask you to consider citing the BEAT software package and the related article.:
-
- > Vasyura-Bathke, Hannes; Dettmer, Jan; Steinberg, Andreas; Heimann, Sebastian; Isken, Marius; Zielke, Olaf; Mai, Paul Martin; Sudhaus, Henriette; Jónsson, Sigurjón: The Bayesian Earthquake Analysis Tool. Seismological Research Letters. https://doi.org/10.1785/0220190075
-
- > Vasyura-Bathke, Hannes; Dettmer, Jan; Steinberg, Andreas; Heimann, Sebastian; Isken, Marius; Zielke, Olaf; Mai, Paul Martin; Sudhaus, Henriette; Jónsson, Sigurjón (2019): BEAT - Bayesian Earthquake Analysis Tool. V. 1.0. GFZ Data Services. http://doi.org/10.5880/fidgeo.2019.024
-
-
-## Tutorials
-Step by step points on how to use the tool for several scenarios can be found here:
-[Examples](https://hvasbath.github.io/beat/examples/index.html)
-
## Data import
### Geodetic
We recommend to prepare the SAR data (subsampling, data covariance estimation) using KITE (www.pyrocko.org).
@@ -107,18 +119,7 @@ containing a list of 2 lists:
We invite the users to propose data formats or outputs of specific programs that they would
like to see implemented.
-## Support
-For substantial issues please use and check the "issues" tab here in the repository.
-For common issues please check out the BEAT [FAQ](https://hvasbath.github.io/beat/faq.html).
-For smaller issues or short clarifications there is a support [chat](https://hive.pyrocko.org/pyrocko-support/channels/beat). This is provided by the pyrocko project and is accessible after a short account creation.
-
-Finally, there is the option to write an email to:
-
-Hannes Vasyura-Bathke
-hvasbath@uni-potsdam.de
-
-Andreas Steinberg
-andreas.steinberg@ifg.uni-kiel.de
+P.S.: If you came looking for the beat package calculating internet time you can find it [here](https://github.com/tonyskapunk/beat).
## Contributions
This is an open source project and contributions to the repository are welcome!
diff --git a/beat/apps/beat.py b/beat/apps/beat.py
index cbfbb13a..cba5995d 100644
--- a/beat/apps/beat.py
+++ b/beat/apps/beat.py
@@ -5,37 +5,42 @@
from os.path import join as pjoin
# disable internal blas parallelisation as we parallelise over chains
-nthreads = "1"
+nthreads = os.environ.get("NUM_THREADS", "1")
os.environ["OMP_NUM_THREADS"] = nthreads
os.environ["NUMEXPR_NUM_THREADS"] = nthreads
os.environ["OPENBLAS_NUM_THREADS"] = nthreads
os.environ["MKL_NUM_THREADS"] = nthreads
os.environ["VECLIB_MAXIMUM_THREADS"] = nthreads
-import copy
-import logging
-import shutil
-import sys
-from collections import OrderedDict
-from optparse import OptionParser
-
-from numpy import array, atleast_2d, floor, zeros, cumsum
-from pyrocko import model, util
-from pyrocko.gf import LocalEngine
-from pyrocko.guts import Dict, dump, load
-from pyrocko.trace import snuffle
-from tqdm import tqdm
-
-from beat import config as bconfig
-from beat import heart, inputf, plotting, utility
-from beat.backend import backend_catalog, extract_bounds_from_summary, thin_buffer
-from beat.config import dist_vars, ffi_mode_str, geometry_mode_str
-from beat.info import version
-from beat.models import Stage, estimate_hypers, load_model, sample
-from beat.sampler.pt import SamplingHistory
-from beat.sampler.smc import sample_factor_final_stage
-from beat.sources import MTQTSource, MTSourceWithMagnitude
-from beat.utility import list2string, string2slice
+if True: # noqa: E402
+ import copy
+ import logging
+ import shutil
+ import sys
+ from collections import OrderedDict
+ from optparse import OptionParser
+
+ from numpy import array, floor, zeros
+ from pyrocko import model, util
+ from pyrocko.gf import LocalEngine
+ from pyrocko.guts import dump, load
+ from pyrocko.trace import snuffle
+ from tqdm import tqdm
+
+ from beat import config as bconfig
+ from beat import heart, inputf, plotting, utility
+ from beat.backend import (
+ backend_catalog,
+ extract_bounds_from_summary,
+ multitrace_to_inference_data,
+ thin_buffer,
+ )
+ from beat.config import bem_mode_str, dist_vars, ffi_mode_str, geometry_mode_str
+ from beat.info import version
+ from beat.models import Stage, estimate_hypers, load_model, sample
+ from beat.sampler.smc import sample_factor_final_stage
+ from beat.sources import MTSourceWithMagnitude
+ from beat.utility import list2string, string2slice
logger = logging.getLogger("beat")
@@ -44,7 +49,7 @@
def d2u(d):
- return dict((k.replace("-", "_"), v) for (k, v) in d.items())
+ return {k.replace("-", "_"): v for (k, v) in d.items()}
subcommand_descriptions = {
@@ -123,7 +128,7 @@ def d2u(d):
"export": 1,
}
-mode_choices = [geometry_mode_str, ffi_mode_str]
+mode_choices = [geometry_mode_str, ffi_mode_str, bem_mode_str]
supported_geodetic_formats = ["matlab", "ascii", "kite"]
supported_geodetic_types = ["SAR", "GNSS"]
@@ -145,21 +150,12 @@ def add_common_options(parser):
def get_project_directory(args, options, nargs=1, popflag=False):
-
largs = len(args)
- if largs == nargs - 1:
- project_dir = os.getcwd()
- elif largs == nargs:
- if popflag:
- name = args.pop(0)
- else:
- name = args[0]
- project_dir = pjoin(os.path.abspath(options.main_path), name)
- else:
- project_dir = os.getcwd()
-
- return project_dir
+ if largs == nargs - 1 or largs != nargs:
+ return os.getcwd()
+ name = args.pop(0) if popflag else args[0]
+ return pjoin(os.path.abspath(options.main_path), name)
def process_common_options(options, project_dir):
@@ -177,14 +173,14 @@ def cl_parse(command, args, setup=None, details=None):
if isinstance(usage, str):
usage = [usage]
- susage = "%s %s" % (program_name, usage[0])
+ susage = f"{program_name} {usage[0]}"
for s in usage[1:]:
susage += "\n%s%s %s" % (" " * 7, program_name, s)
description = descr[0].upper() + descr[1:] + "."
if details:
- description = description + " %s" % details
+ description = f"{description} {details}"
parser = OptionParser(usage=susage, description=description)
@@ -207,20 +203,23 @@ def list_callback(option, opt, value, parser):
setattr(parser.values, option.dest, out)
+def list_callback_int(option, opt, value, parser):
+ out = [int(ival.lstrip()) for ival in value.split(",")]
+ setattr(parser.values, option.dest, out)
+
+
def get_sampled_slip_variables(config):
slip_varnames = config.problem_config.get_slip_variables()
- rvs, fixed_rvs = config.problem_config.get_random_variables()
+ rvs, _ = config.problem_config.get_random_variables()
varnames = list(set(slip_varnames).intersection(set(list(rvs.keys()))))
return varnames
def command_init(args):
-
command_str = "init"
def setup(parser):
-
parser.add_option(
"--min_mag",
dest="min_mag",
@@ -257,21 +256,29 @@ def setup(parser):
)
parser.add_option(
- "--source_type",
- dest="source_type",
- choices=bconfig.source_names,
- default="RectangularSource",
- help="Source type to solve for; %s"
- '. Default: "RectangularSource"'
- % ('", "'.join(name for name in bconfig.source_names)),
+ "--source_types",
+ dest="source_types",
+ type="string",
+ action="callback",
+ callback=list_callback,
+ default=["RectangularSource"],
+ help="List of source types to solve for. Can be any combination of the "
+ "following for mode: geometry - %s; bem - %s; "
+ "Default: 'RectangularSource'"
+ % (
+ list2string(bconfig.source_catalog.keys()),
+ list2string(bconfig.bem_source_catalog.keys()),
+ ),
)
parser.add_option(
"--n_sources",
dest="n_sources",
- type="int",
- default=1,
- help="Integer Number of sources to invert for. Default: 1",
+ type="string",
+ default=[1],
+ action="callback",
+ callback=list_callback_int,
+ help="List of integer numbers of sources per source type to invert for. Default: [1]",
)
parser.add_option(
@@ -319,7 +326,7 @@ def setup(parser):
" for each station!",
)
- parser, options, args = cl_parse("init", args, setup=setup)
+ parser, options, args = cl_parse(command_str, args, setup=setup)
la = len(args)
@@ -346,7 +353,7 @@ def setup(parser):
min_magnitude=options.min_mag,
datatypes=options.datatypes,
mode=options.mode,
- source_type=options.source_type,
+ source_types=options.source_types,
n_sources=options.n_sources,
waveforms=options.waveforms,
sampler=options.sampler,
@@ -357,7 +364,6 @@ def setup(parser):
def command_import(args):
-
from pyrocko import io
command_str = "import"
@@ -365,7 +371,6 @@ def command_import(args):
data_formats = io.allowed_formats("load")[2::]
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -455,7 +460,6 @@ def setup(parser):
stations = model.load_stations(pjoin(sc.datadir, "stations.txt"))
if options.seismic_format == "autokiwi":
-
data_traces = inputf.load_data_traces(
datadir=sc.datadir, stations=stations, divider="-"
)
@@ -491,7 +495,6 @@ def setup(parser):
geodetic_outpath = pjoin(c.project_dir, bconfig.geodetic_data_name)
if not os.path.exists(geodetic_outpath) or options.force:
-
gtargets = []
for typ, config in gc.types.items():
logger.info(
@@ -578,7 +581,6 @@ def setup(parser):
else:
# load kite model
- from kite import sandbox_scene
kite_model = load(filename=options.results)
n_sources = len(kite_model.sources)
@@ -586,7 +588,7 @@ def setup(parser):
reference_sources = bconfig.init_reference_sources(
kite_model.sources,
n_sources,
- c.problem_config.source_type,
+ c.problem_config.source_types[0],
c.problem_config.stf_type,
event=c.event,
)
@@ -610,7 +612,6 @@ def setup(parser):
logger.info("Geodetic datatype listed-importing ...")
gc = problem.composites["geodetic"]
if c.geodetic_config.corrections_config.has_enabled_corrections:
-
logger.info("Importing correction parameters ...")
new_bounds = OrderedDict()
@@ -644,7 +645,11 @@ def setup(parser):
param = wmap.time_shifts_id
new_bounds[param] = extract_bounds_from_summary(
- summarydf, varname=param, shape=(wmap.hypersize,), roundto=0
+ summarydf,
+ varname=param,
+ shape=(wmap.hypersize,),
+ roundto=0,
+ alpha=0.06,
)
new_bounds[param].append(point[param])
@@ -659,6 +664,11 @@ def setup(parser):
if options.mode == ffi_mode_str:
n_sources = problem.config.problem_config.n_sources
+ if len(n_sources) != 1:
+ raise TypeError(
+ "FFI with more than one source type is not implemented!"
+ )
+
if options.import_from_mode == geometry_mode_str:
logger.info("Importing non-linear source geometry results!")
@@ -667,12 +677,12 @@ def setup(parser):
point.pop(param)
point = utility.adjust_point_units(point)
- source_points = utility.split_point(point)
+ source_points = utility.split_point(point, n_sources_total=n_sources[0])
reference_sources = bconfig.init_reference_sources(
source_points,
- n_sources,
- c.problem_config.source_type,
+ n_sources[0],
+ c.problem_config.source_types[0],
c.problem_config.stf_type,
event=c.event,
)
@@ -683,11 +693,14 @@ def setup(parser):
c.seismic_config.gf_config.reference_sources = reference_sources
if "seismic" in problem.config.problem_config.datatypes:
-
new_bounds = {}
for param in ["time"]:
new_bounds[param] = extract_bounds_from_summary(
- summarydf, varname=param, shape=(n_sources,), roundto=0
+ summarydf,
+ varname=param,
+ shape=(n_sources[0],),
+ roundto=0,
+ alpha=0.06,
)
new_bounds[param].append(point[param])
@@ -705,7 +718,11 @@ def setup(parser):
shape = (n_sources,)
new_bounds[param] = extract_bounds_from_summary(
- summarydf, varname=param, shape=shape, roundto=1
+ summarydf,
+ varname=param,
+ shape=shape,
+ roundto=1,
+ alpha=0.06,
)
new_bounds[param].append(point[param])
@@ -713,6 +730,7 @@ def setup(parser):
elif options.mode == geometry_mode_str:
if options.import_from_mode == geometry_mode_str:
+ # TODO update for n_sources refactoring
n_sources = problem.config.problem_config.n_sources
logger.info("Importing non-linear source geometry results!")
@@ -727,7 +745,11 @@ def setup(parser):
for param in common_source_params:
try:
new_bounds[param] = extract_bounds_from_summary(
- summarydf, varname=param, shape=(n_sources,), roundto=0
+ summarydf,
+ varname=param,
+ shape=(n_sources,),
+ roundto=0,
+ alpha=0.06,
)
new_bounds[param].append(point[param])
except KeyError:
@@ -750,11 +772,9 @@ def setup(parser):
def command_update(args):
-
command_str = "update"
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -805,13 +825,11 @@ def setup(parser):
def command_clone(args):
-
command_str = "clone"
from beat.config import _datatype_choices as datatype_choices
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -832,13 +850,29 @@ def setup(parser):
)
parser.add_option(
- "--source_type",
- dest="source_type",
- choices=bconfig.source_names,
- default=None,
- help="Source type to replace in config; %s"
- '. Default: "dont change"'
- % ('", "'.join(name for name in bconfig.source_names)),
+ "--source_types",
+ dest="source_types",
+ type="string",
+ action="callback",
+ callback=list_callback,
+ default=[],
+ help="Source types to replace sources with. Can be any combination of the "
+ "following for mode: geometry - %s; bem - %s; "
+ "Default: No replacement!"
+ % (
+ list2string(bconfig.source_catalog.keys()),
+ list2string(bconfig.bem_source_catalog.keys()),
+ ),
+ )
+
+ parser.add_option(
+ "--n_sources",
+ dest="n_sources",
+ type="string",
+ default=[1],
+ action="callback",
+ callback=list_callback_int,
+ help="List of integer numbers of sources per source type to invert for. Default: [1]",
)
parser.add_option(
@@ -870,7 +904,7 @@ def setup(parser):
parser, options, args = cl_parse(command_str, args, setup=setup)
- if not len(args) == 2:
+ if len(args) != 2:
parser.print_help()
sys.exit(1)
@@ -950,24 +984,26 @@ def setup(parser):
shutil.copytree(linear_gf_dir_name, cloned_linear_gf_dir_name)
logger.info("Successfully cloned linear GF libraries.")
- if options.source_type is None:
+ if len(options.source_types) == 0 and sum(options.n_sources) == 1:
old_priors = copy.deepcopy(c.problem_config.priors)
- new_priors = c.problem_config.select_variables()
- for prior in new_priors:
+ new_prior_names = (
+ c.problem_config.get_variables_mapping().unique_variables_sizes().keys()
+ )
+ for prior in new_prior_names:
if prior in list(old_priors.keys()):
c.problem_config.priors[prior] = old_priors[prior]
else:
- logger.info('Replacing source with "%s"' % options.source_type)
- c.problem_config.source_type = options.source_type
- c.problem_config.init_vars()
+ logger.info('Replacing sources with "%s"' % options.source_types)
+ c.problem_config.n_sources = options.n_sources
+ c.problem_config.source_types = options.source_types
c.problem_config.set_decimation_factor()
- re_init = False
+ re_init = True
if re_init:
logger.info(
- "Re-initialized priors because of new datatype!"
+ "Re-initialized priors because of new source/datatype!"
" Please check prior bounds!"
)
c.problem_config.init_vars()
@@ -990,7 +1026,6 @@ def setup(parser):
def command_sample(args):
-
command_str = "sample"
def setup(parser):
@@ -1040,16 +1075,13 @@ def result_check(mtrace, min_length):
def command_summarize(args):
-
+ from arviz import summary
from numpy import hstack, ravel, split, vstack
- from pymc3 import summary
from pyrocko.gf import RectangularSource
- from pyrocko.moment_tensor import MomentTensor
command_str = "summarize"
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -1087,9 +1119,9 @@ def setup(parser):
"--stage_number",
dest="stage_number",
type="int",
- default=None,
+ default=-1,
help='Int of the stage number "n" of the stage to be summarized.'
- " Default: all stages up to last complete stage",
+ " Default: -1, i.e. posterior probability density",
)
parser, options, args = cl_parse(command_str, args, setup=setup)
@@ -1122,7 +1154,6 @@ def setup(parser):
rm_flag = False
for stage_number in stage_numbers:
-
stage_path = stage.handler.stage_path(stage_number)
logger.info("Summarizing stage under: %s" % stage_path)
@@ -1236,8 +1267,10 @@ def setup(parser):
)
store = engine.get_store(target.store_id)
+ logger.debug("n_chains %i", len(chains))
for chain in tqdm(chains):
for idx in idxs:
+ logger.debug("chain %i idx %i", chain, idx)
point = stage.mtrace.point(idx=idx, chain=chain)
reference.update(point)
# normalize MT source, TODO put into get_derived_params
@@ -1255,23 +1288,34 @@ def setup(parser):
# BEAT sources calculate derived params
if options.calc_derived:
composite.point2sources(point)
- if hasattr(source, "get_derived_parameters"):
+ if options.mode == geometry_mode_str:
for source in sources:
- deri = source.get_derived_parameters(
- point=reference, # need to pass correction params
- store=store,
- target=target,
- event=problem.config.event,
- )
- derived.append(deri)
-
- # pyrocko Rectangular source, TODO use BEAT RS ...
- elif isinstance(source, RectangularSource):
- for source in sources:
- source.magnitude = None
- derived.append(
- source.get_magnitude(store=store, target=target)
- )
+ if hasattr(source, "get_derived_parameters"):
+ deri = source.get_derived_parameters(
+ point=reference, # need to pass correction params
+ store=store,
+ target=target,
+ event=problem.config.event,
+ )
+ derived.append(deri)
+
+ # pyrocko Rectangular source, TODO use BEAT RS ...
+ elif isinstance(source, RectangularSource):
+ source.magnitude = None
+ derived.append(
+ source.get_magnitude(store=store, target=target)
+ )
+
+ if len(pc.source_types) > 1:
+ derived = [hstack(derived)]
+
+ elif options.mode == bem_mode_str:
+ response = composite.engine.process(
+ sources=composite.sources, targets=composite.targets
+ )
+ derived = response.get_source_magnitudes(
+ composite.engine.config.shear_modulus
+ )
lpoint = problem.model.lijection.d2l(point)
@@ -1311,7 +1355,10 @@ def setup(parser):
if not os.path.exists(summary_file) or options.force:
logger.info("Writing summary to %s" % summary_file)
- df = summary(rtrace, alpha=0.01)
+
+ idata = multitrace_to_inference_data(rtrace)
+ df = summary(idata, round_to=4, skipna=True)
+ logger.info(df.__str__())
with open(summary_file, "w") as outfile:
df.to_string(outfile)
else:
@@ -1319,11 +1366,9 @@ def setup(parser):
def command_build_gfs(args):
-
command_str = "build_gfs"
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -1527,7 +1572,7 @@ def setup(parser):
logger.info("Fault discretization done! Updating problem_config...")
logger.info("%s" % fault.__str__())
- c.problem_config.n_sources = fault.nsubfaults
+ c.problem_config.n_sources = [fault.nsubfaults]
mode_c.npatches = fault.npatches
mode_c.subfault_npatches = fault.subfault_npatches
@@ -1586,6 +1631,7 @@ def setup(parser):
targets = heart.init_geodetic_targets(
datasets,
+ event=c.event,
earth_model_name=gf.earth_model_name,
interpolation=c.geodetic_config.interpolation,
crust_inds=[crust_ind],
@@ -1593,7 +1639,6 @@ def setup(parser):
)
if not fault.is_discretized and fault.needs_optimization:
-
ffidir = os.path.join(c.project_dir, options.mode)
if options.plot:
@@ -1645,7 +1690,6 @@ def setup(parser):
)
elif datatype == "seismic":
-
sc = c.seismic_config
gf = sc.gf_config
pc = c.problem_config
@@ -1697,11 +1741,9 @@ def setup(parser):
def command_plot(args):
-
command_str = "plot"
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -1734,9 +1776,9 @@ def setup(parser):
"--stage_number",
dest="stage_number",
type="int",
- default=None,
+ default=-1,
help='Int of the stage number "n" of the stage to be plotted.'
- " Default: all stages up to last complete stage",
+ " Default: -1, i.e. the posterior probability density",
)
parser.add_option(
@@ -1753,7 +1795,7 @@ def setup(parser):
"--nensemble",
dest="nensemble",
type="int",
- default=1,
+ default=0,
help="Int of the number of solutions that" " are used for fuzzy plots",
)
@@ -1826,9 +1868,7 @@ def setup(parser):
plots_avail = plotting.available_plots()
details = """Available are: %s or "all". Multiple plots can be
-selected giving a comma separated list.""" % list2string(
- plots_avail
- )
+selected giving a comma separated list.""" % list2string(plots_avail)
parser, options, args = cl_parse(command_str, args, setup, details)
@@ -1900,9 +1940,9 @@ def setup(parser):
)
else:
try:
- po.reference = problem.model.test_point
+ po.reference = problem.model.initial_point()
step = problem.init_sampler()
- po.reference["like"] = step.step(problem.model.test_point)[1][-1]
+ po.reference["like"] = step.step(po.reference)[1][-1]
except AttributeError:
po.reference = problem.config.problem_config.get_test_point()
else:
@@ -1919,7 +1959,6 @@ def setup(parser):
# except Exception as err:
# pass
except (TypeError, plotting.ModeError, NotImplementedError) as err:
-
logger.warning(
"Could not plot %s got Error: %s \n %s"
% (plot, err, traceback.format_exc())
@@ -1927,7 +1966,6 @@ def setup(parser):
def command_check(args):
-
command_str = "check"
what_choices = ["stores", "traces", "library", "geometry", "discretization"]
@@ -2027,7 +2065,6 @@ def setup(parser):
for datatype in options.datatypes:
for var in get_sampled_slip_variables(problem.config):
-
outdir = pjoin(
problem.config.project_dir,
options.mode,
@@ -2111,7 +2148,7 @@ def setup(parser):
try:
from kite import SandboxScene
from kite import sources as ksources
- from kite.scene import Scene, UserIOWarning
+ from kite.scene import UserIOWarning
from kite.talpa import Talpa
talpa_source_catalog = {
@@ -2189,7 +2226,7 @@ def setup(parser):
lat=gc.event.lat, lon=gc.event.lon, north_shift=n, east_shift=e
)
- src_class_name = problem.config.problem_config.source_type
+ src_class_name = problem.config.problem_config.source_types[0]
for source in sources:
# source.regularize()
try:
@@ -2212,11 +2249,9 @@ def setup(parser):
def command_export(args):
-
command_str = "export"
def setup(parser):
-
parser.add_option(
"--main_path",
dest="main_path",
@@ -2242,7 +2277,7 @@ def setup(parser):
type="int",
default=-1,
help='Int of the stage number "n" of the stage to be summarized.'
- " Default: all stages up to last complete stage",
+ " Default: -1, i.e. the posterior probability density",
)
parser.add_option(
@@ -2284,7 +2319,7 @@ def setup(parser):
problem = load_model(project_dir, options.mode, hypers=False, build=False)
problem.init_hierarchicals()
- sc = problem.config.sampler_config
+ # sc = problem.config.sampler_config
trace_name = "chain--1.{}".format(problem.config.sampler_config.backend)
results_path = pjoin(problem.outfolder, bconfig.results_dir_name)
@@ -2396,7 +2431,6 @@ def setup(parser):
def main():
-
if len(sys.argv) < 2:
sys.exit("Usage: %s" % usage)
@@ -2413,6 +2447,8 @@ def main():
if acommand in subcommands:
globals()["command_" + acommand](["--help"])
+ if "mpi4py" in sys.modules:
+ print("MPI has been imported")
sys.exit("Usage: %s" % usage)
else:
diff --git a/beat/apps/beatdown.py b/beat/apps/beatdown.py
index 809b6384..ee669987 100644
--- a/beat/apps/beatdown.py
+++ b/beat/apps/beatdown.py
@@ -9,7 +9,7 @@
try:
from urllib.error import HTTPError
-except:
+except ImportError:
from urllib2 import HTTPError
import glob
@@ -82,7 +82,7 @@ def get_events(time_range, region=None, catalog=geofon, **kwargs):
return catalog.get_events(time_range, **kwargs)
events = []
- for (west, east, south, north) in automap.split_region(region):
+ for west, east, south, north in automap.split_region(region):
events.extend(
catalog.get_events(
time_range=time_range,
@@ -90,7 +90,7 @@ def get_events(time_range, region=None, catalog=geofon, **kwargs):
lonmax=east,
latmin=south,
latmax=north,
- **kwargs
+ **kwargs,
)
)
@@ -171,7 +171,6 @@ def __call__(self, time, distance, depth):
for ray in self.model.arrivals(
phases=self.phases, zstart=depth, distances=[distance * cake.m2d]
):
-
return time + ray.t + self.omin, time + ray.t + self.omax
raise NoArrival
@@ -452,7 +451,6 @@ def main():
options.local_responses_resp,
options.local_responses_stationxml,
):
-
if resp_opt:
n_resp_opt += 1
@@ -813,7 +811,7 @@ def main():
dist = orthodrome.distance_accurate50m_numpy(
lat_, lon_, channel.latitude.value, channel.longitude.value
)
- except:
+ except AttributeError:
dist = orthodrome.distance_accurate50m_numpy(
lat_, lon_, channel.latitude, channel.longitude
)
@@ -835,7 +833,7 @@ def main():
if channel.sample_rate:
try:
deltat = 1.0 / int(channel.sample_rate.value)
- except:
+ except AttributeError:
deltat = 1.0 / int(channel.sample_rate)
else:
deltat = 1.0
@@ -854,7 +852,7 @@ def main():
)
)
if options.dry_run:
- for (net, sta, loc, cha, tmin, tmax) in selection:
+ for net, sta, loc, cha, tmin, tmax in selection:
available_through[net, sta, loc, cha].add(site)
else:
@@ -873,7 +871,7 @@ def main():
data = fdsn.dataselect(
site=site,
selection=selection_now,
- **get_user_credentials(site)
+ **get_user_credentials(site),
)
while True:
@@ -956,7 +954,6 @@ def plural_s(x):
for sites in sorted(
all_channels.keys(), key=lambda sites: (-len(sites), sites)
):
-
nchannels = len(all_channels[sites])
nstations = len(all_stations[sites])
nchannels_all += nchannels
@@ -1022,7 +1019,6 @@ def plural_s(x):
for traces in plocal.chopper_grouped(
gather=lambda tr: tr.nslc_id, tmin=tmin, tmax=tmax, tinc=tinc
):
-
for tr in traces:
if tr.nslc_id not in have_data:
fns.extend(io.save(traces, fn_template_raw))
@@ -1086,7 +1082,6 @@ def plural_s(x):
for traces_a in p.chopper_grouped(
gather=lambda tr: tr.nslc_id, tmin=otmin, tmax=otmax, tinc=otinc, tpad=otpad
):
-
rest_traces_a = []
win_a = None
for tr in traces_a:
@@ -1204,8 +1199,7 @@ def plural_s(x):
else:
assert False
- for (proj, in_channels, out_channels) in pios:
-
+ for proj, in_channels, out_channels in pios:
proc = trace.project(traces, proj, in_channels, out_channels)
for tr in proc:
tr_beat = heart.SeismicDataset.from_pyrocko_trace(tr)
diff --git a/beat/backend.py b/beat/backend.py
index 74cee0d6..95e0d23f 100644
--- a/beat/backend.py
+++ b/beat/backend.py
@@ -1,8 +1,8 @@
"""
-Text file trace backend modified from pymc3 to work efficiently with
+File trace backends modified from pymc to work efficiently with
SMC
-Store sampling values as CSV files.
+Store sampling values as CSV or binary files.
File format
-----------
@@ -37,11 +37,17 @@
except ImportError:
from pandas.errors import ParserError as CParserError
-from pymc3.backends import base, ndarray
-from pymc3.backends import tracetab as ttab
-from pymc3.blocking import ArrayOrdering, DictToArrayBijection
-from pymc3.model import modelcontext
-from pymc3.step_methods.arraystep import BlockedStep
+from typing import (
+ Set,
+)
+
+from arviz import convert_to_inference_data
+
+# from arviz.data.base import dict_to_dataset
+from pymc.backends import base, ndarray
+from pymc.blocking import DictToArrayBijection, RaveledVars
+from pymc.model import modelcontext
+from pymc.step_methods.arraystep import BlockedStep
from pyrocko import util
from beat.config import sample_p_outname, transd_vars_dist
@@ -50,13 +56,48 @@
ListArrayOrdering,
ListToArrayBijection,
dump_objects,
- list2string,
load_objects,
)
logger = logging.getLogger("backend")
+def _create_flat_names(varname, shape):
+ """Return flat variable names for `varname` of `shape`.
+
+ Examples
+ --------
+ >>> _create_flat_names('x', (5,))
+ ['x__0', 'x__1', 'x__2', 'x__3', 'x__4']
+
+ >>> _create_flat_names('x', (2, 2))
+ ['x__0_0', 'x__0_1', 'x__1_0', 'x__1_1']
+ """
+ if not shape:
+ return [varname]
+ labels = (num.ravel(xs).tolist() for xs in num.indices(shape))
+ labels = (map(str, xs) for xs in labels)
+ return ["{}__{}".format(varname, "_".join(idxs)) for idxs in zip(*labels)]
+
+
+def _create_flat_names_summary(varname, shape):
+ if not shape or sum(shape) == 1:
+ return [varname]
+
+ labels = (num.ravel(xs).tolist() for xs in num.indices(shape))
+ labels = (map(str, [xs]) for xs in labels)
+ return ["{}{}".format(varname, "".join(idxs)) for idxs in zip(*labels)]
+
+
+def _create_shape(flat_names):
+ """Determine shape from `_create_flat_names` output."""
+ try:
+ _, shape_str = flat_names[-1].rsplit("__", 1)
+ except ValueError:
+ return ()
+ return tuple(int(i) + 1 for i in shape_str.split("_"))
+
+
def thin_buffer(buffer, buffer_thinning, ensure_last=True):
"""
Reduce a list of objects by a given value.
@@ -86,30 +127,25 @@ class ArrayStepSharedLLK(BlockedStep):
Parameters
----------
-
- vars : list
+ value_vars : list
variables to be sampled
out_vars : list
variables to be stored in the traces
shared : dict
- theano variable -> shared variables
- blocked : boolean
- (default True)
+ pytensor variable -> shared variables
"""
- def __init__(self, vars, out_vars, shared, blocked=True):
- self.vars = vars
- self.ordering = ArrayOrdering(vars)
+ def __init__(self, value_vars, out_vars, shared):
+ self.value_vars = value_vars
self.lordering = ListArrayOrdering(out_vars, intype="tensor")
lpoint = [var.tag.test_value for var in out_vars]
self.shared = {var.name: shared for var, shared in shared.items()}
- self.blocked = blocked
- self.bij = DictToArrayBijection(self.ordering, self.population[0])
+ self.blocked = True
blacklist = list(
- set(self.lordering.variables) - set([var.name for var in vars])
+ set(self.lordering.variables) - set([var.name for var in value_vars])
)
-
+ self.bij = DictToArrayBijection()
self.lij = ListToArrayBijection(self.lordering, lpoint, blacklist=blacklist)
def __getstate__(self):
@@ -119,12 +155,23 @@ def __setstate__(self, state):
self.__dict__.update(state)
def step(self, point):
- for var, share in self.shared.items():
- share.container.storage[0] = point[var]
+ for name, shared_var in self.shared.items():
+ shared_var.set_value(point[name])
+
+ # print("point", point)
+
+ # assure order and content of RVs consistent to value_vars
+ point = {val_var.name: point[val_var.name] for val_var in self.value_vars}
- apoint, alist = self.astep(self.bij.map(point))
+ q = self.bij.map(point)
+ # print("before", q.data)
+ apoint, alist = self.astep(q.data)
+ # print("after", apoint, alist)
+ if not isinstance(apoint, RaveledVars):
+ # We assume that the mapping has stayed the same
+ apoint = RaveledVars(apoint, q.point_map_info)
- return self.bij.rmap(apoint), alist
+ return self.bij.rmap(apoint, start_point=point), alist
class BaseChain(object):
@@ -133,18 +180,16 @@ class BaseChain(object):
Parameters
----------
-
model : Model
If None, the model is taken from the `with` context.
- vars : list of variables
+ value_vars : list of variables
Sampling values will be stored for these variables. If None,
`model.unobserved_RVs` is used.
"""
- def __init__(self, model=None, vars=None, buffer_size=5000, buffer_thinning=1):
-
- self.model = None
- self.vars = None
+ def __init__(
+ self, model=None, value_vars=None, buffer_size=5000, buffer_thinning=1
+ ):
self.var_shapes = None
self.chain = None
@@ -155,21 +200,18 @@ def __init__(self, model=None, vars=None, buffer_size=5000, buffer_thinning=1):
self.cov_counter = 0
if model is not None:
- self.model = modelcontext(model)
+ model = modelcontext(model)
- if vars is None and self.model is not None:
- vars = self.model.unobserved_RVs
+ if value_vars is None and model is not None:
+ value_vars = model.unobserved_RVs
- if vars is not None:
- self.vars = vars
-
- if self.vars is not None:
+ if value_vars is not None:
# Get variable shapes. Most backends will need this
# information.
self.var_shapes = OrderedDict()
self.var_dtypes = OrderedDict()
self.varnames = []
- for var in self.vars:
+ for var in value_vars:
self.var_shapes[var.name] = var.tag.test_value.shape
self.var_dtypes[var.name] = var.tag.test_value.dtype
self.varnames.append(var.name)
@@ -238,16 +280,15 @@ def __init__(
self,
dir_path="",
model=None,
- vars=None,
+ value_vars=None,
buffer_size=5000,
buffer_thinning=1,
progressbar=False,
k=None,
):
-
super(FileChain, self).__init__(
model=model,
- vars=vars,
+ value_vars=value_vars,
buffer_size=buffer_size,
buffer_thinning=buffer_thinning,
)
@@ -264,10 +305,10 @@ def __init__(
if var in transd_vars_dist:
shape = (k,)
- self.flat_names[var] = ttab.create_flat_names(var, shape)
+ self.flat_names[var] = _create_flat_names(var, shape)
else:
for var, shape in self.var_shapes.items():
- self.flat_names[var] = ttab.create_flat_names(var, shape)
+ self.flat_names[var] = _create_flat_names(var, shape)
self.k = k
@@ -278,6 +319,7 @@ def __init__(
self.draws = 0
self._df = None
self.filename = None
+ self.derived_mapping = None
def __len__(self):
if self.filename is None:
@@ -298,8 +340,17 @@ def add_derived_variables(self, varnames, shapes):
"Inconsistent number of variables %i and shapes %i!" % (nvars, nshapes)
)
+ self.derived_mapping = {}
for varname, shape in zip(varnames, shapes):
- self.flat_names[varname] = ttab.create_flat_names(varname, shape)
+ if varname in self.varnames:
+ exist_idx = self.varnames.index(varname)
+ self.varnames.pop(exist_idx)
+ exist_shape = self.var_shapes[varname]
+ shape = tuple(map(sum, zip(exist_shape, shape)))
+ concat_idx = len(self.varnames)
+ self.derived_mapping[exist_idx] = concat_idx
+
+ self.flat_names[varname] = _create_flat_names(varname, shape)
self.var_shapes[varname] = shape
self.var_dtypes[varname] = "float64"
self.varnames.append(varname)
@@ -314,7 +365,6 @@ def data_file(self):
return self._df
def record_buffer(self):
-
if self.chain is None:
raise ValueError("Chain has not been setup. Saving samples not possible!")
@@ -344,6 +394,11 @@ def write(self, lpoint, draw):
If buffer is full write samples to file.
"""
self.count += 1
+ if self.derived_mapping:
+ for exist_idx, concat_idx in self.derived_mapping.items():
+ value = lpoint.pop(exist_idx)
+ lpoint[concat_idx] = num.hstack((value, lpoint[concat_idx]))
+
self.buffer.append((lpoint, draw))
if self.count == self.buffer_size:
self.record_buffer()
@@ -354,6 +409,20 @@ def clear_data(self):
"""
self._df = None
+ def _get_sampler_stats(
+ self, stat_name: str, sampler_idx: int, burn: int, thin: int
+ ) -> num.ndarray:
+ """Get sampler statistics."""
+ raise NotImplementedError()
+
+ @property
+ def stat_names(self) -> Set[str]:
+ names: Set[str] = set()
+ for vars in self.sampler_vars or []:
+ names.update(vars.keys())
+
+ return names
+
class MemoryChain(BaseChain):
"""
@@ -368,7 +437,6 @@ class MemoryChain(BaseChain):
"""
def __init__(self, buffer_size=5000):
-
super(MemoryChain, self).__init__(buffer_size=buffer_size)
def setup(self, draws, chain, overwrite=False):
@@ -417,17 +485,16 @@ def __init__(
self,
dir_path,
model=None,
- vars=None,
+ value_vars=None,
buffer_size=5000,
buffer_thinning=1,
progressbar=False,
k=None,
):
-
super(TextChain, self).__init__(
dir_path,
model,
- vars,
+ value_vars,
buffer_size=buffer_size,
progressbar=progressbar,
k=k,
@@ -477,6 +544,7 @@ def lpoint2file(filehandle, lpoint):
columns = itertools.chain.from_iterable(
map(str, value.ravel()) for value in lpoint
)
+ # print("backend write", columns)
filehandle.write(",".join(columns) + "\n")
# Write binary
@@ -545,7 +613,7 @@ def get_values(self, varname, burn=0, thin=1):
shape = (self._df.shape[0],) + self.var_shapes[varname]
vals = var_df.values.ravel().reshape(shape)
return vals[burn::thin]
- except (KeyError):
+ except KeyError:
raise ValueError(
'Did not find varname "%s" in sampling ' "results! Fixed?" % varname
)
@@ -616,17 +684,16 @@ def __init__(
self,
dir_path,
model=None,
- vars=None,
+ value_vars=None,
buffer_size=5000,
progressbar=False,
k=None,
buffer_thinning=1,
):
-
super(NumpyChain, self).__init__(
dir_path,
model,
- vars,
+ value_vars,
progressbar=progressbar,
buffer_size=buffer_size,
buffer_thinning=buffer_thinning,
@@ -639,7 +706,7 @@ def __repr__(self):
return "NumpyChain({},{},{},{},{},{})".format(
self.dir_path,
self.model,
- self.vars,
+ self.value_vars,
self.buffer_size,
self.progressbar,
self.k,
@@ -772,7 +839,6 @@ def lpoint2file(filehandle, varnames, data, lpoint):
print("Error on write file: ", e)
def _load_df(self):
-
if not self.__data_structure:
try:
self.__data_structure = self.construct_data_structure()
@@ -800,7 +866,7 @@ def get_values(self, varname, burn=0, thin=1):
shape = (self._df.shape[0],) + self.var_shapes[varname]
vals = data.ravel().reshape(shape)
return vals[burn::thin]
- except (ValueError):
+ except ValueError:
raise ValueError(
'Did not find varname "%s" in sampling ' "results! Fixed?" % varname
)
@@ -838,15 +904,14 @@ class TransDTextChain(object):
"""
def __init__(
- self, name, model=None, vars=None, buffer_size=5000, progressbar=False
+ self, name, model=None, value_vars=None, buffer_size=5000, progressbar=False
):
-
self._straces = {}
self.buffer_size = buffer_size
self.progressbar = progressbar
- if vars is None:
- vars = model.unobserved_RVs
+ if value_vars is None:
+ value_vars = model.unobserved_RVs
transd, dims_idx = istransd(model)
if transd:
@@ -868,7 +933,7 @@ def __init__(
# init indexing chain
self._index = TextChain(
dir_path=name,
- vars=[],
+ value_vars=[],
buffer_size=self.buffer_size,
progressbar=self.progressbar,
)
@@ -968,16 +1033,16 @@ def get_stage_indexes(self, load_stage=None):
else:
stage_number = self.highest_sampled_stage()
- if os.path.exists(self.atmip_path(-1)):
+ if os.path.exists(self.smc_path(-1)):
list_indexes = [i for i in range(-1, stage_number + 1)]
else:
list_indexes = [i for i in range(stage_number)]
return list_indexes
- def atmip_path(self, stage_number):
+ def smc_path(self, stage_number):
"""
- Consistent naming for atmip params.
+ Consistent naming for smc params.
"""
return os.path.join(self.stage_path(stage_number), sample_p_outname)
@@ -991,7 +1056,7 @@ def load_sampler_params(self, stage_number):
of stage number or -1 for last stage
"""
if stage_number == -1:
- if not os.path.exists(self.atmip_path(stage_number)):
+ if not os.path.exists(self.smc_path(stage_number)):
prev = self.highest_sampled_stage()
else:
prev = stage_number
@@ -1001,15 +1066,15 @@ def load_sampler_params(self, stage_number):
prev = stage_number - 1
logger.info("Loading parameters from completed stage {}".format(prev))
- sampler_state, updates = load_objects(self.atmip_path(prev))
+ sampler_state, updates = load_objects(self.smc_path(prev))
sampler_state["stage"] = stage_number
return sampler_state, updates
- def dump_atmip_params(self, stage_number, outlist):
+ def dump_smc_params(self, stage_number, outlist):
"""
- Save atmip params to file.
+ Save smc params to file.
"""
- dump_objects(self.atmip_path(stage_number), outlist)
+ dump_objects(self.smc_path(stage_number), outlist)
def clean_directory(self, stage, chains, rm_flag):
"""
@@ -1041,7 +1106,7 @@ def load_multitrace(self, stage, chains=None, varnames=None):
Returns
-------
- A :class:`pymc3.backend.base.MultiTrace` instance
+ A :class:`pymc.backend.base.MultiTrace` instance
"""
dirname = self.stage_path(stage)
return load_multitrace(
@@ -1051,7 +1116,6 @@ def load_multitrace(self, stage, chains=None, varnames=None):
def recover_existing_results(
self, stage, draws, step, buffer_thinning=1, varnames=None, update=None
):
-
if stage > 0:
prev = stage - 1
if update is not None:
@@ -1116,7 +1180,7 @@ def load_multitrace(dirname, varnames=[], chains=None, backend="csv"):
Returns
-------
- A :class:`pymc3.backend.base.MultiTrace` instance
+ A :class:`pymc.backend.base.MultiTrace` instance
"""
if not istransd(varnames)[0]:
@@ -1164,7 +1228,7 @@ def check_multitrace(mtrace, draws, n_chains, buffer_thinning=1):
Parameters
----------
- mtrace : :class:`pymc3.backend.base.MultiTrace`
+ mtrace : :class:`pymc.backend.base.MultiTrace`
Multitrace object containing the sampling traces
draws : int
Number of steps (i.e. chain length for each Markov Chain)
@@ -1225,7 +1289,7 @@ def get_highest_sampled_stage(homedir, return_final=False):
def load_sampler_params(project_dir, stage_number, mode):
"""
- Load saved parameters from given ATMIP stage.
+ Load saved parameters from given smc stage.
Parameters
----------
@@ -1268,7 +1332,7 @@ def concatenate_traces(mtraces):
def extract_variables_from_df(dataframe):
"""
- Extract random variables and their shapes from the pymc3-pandas data-frame
+ Extract random variables and their shapes from the pymc-pandas data-frame
Parameters
----------
@@ -1293,7 +1357,7 @@ def extract_variables_from_df(dataframe):
indexes.append(index)
flat_names[varname] = indexes
- var_shapes[varname] = ttab._create_shape(indexes)
+ var_shapes[varname] = _create_shape(indexes)
return flat_names, var_shapes
@@ -1310,9 +1374,9 @@ def extract_bounds_from_summary(summary, varname, shape, roundto=None, alpha=0.0
def do_nothing(value):
return value
- indexes = ttab.create_flat_names(varname, shape)
- lower_quant = "hpd_{0:g}".format(100 * alpha / 2)
- upper_quant = "hpd_{0:g}".format(100 * (1 - alpha / 2))
+ indexes = _create_flat_names_summary(varname, shape)
+ lower_quant = "hdi_{0:g}%".format(100 * alpha / 2)
+ upper_quant = "hdi_{0:g}%".format(100 * (1 - alpha / 2))
bounds = []
for quant in [lower_quant, upper_quant]:
@@ -1332,3 +1396,20 @@ def do_nothing(value):
bounds.append(values)
return bounds
+
+
+def multitrace_to_inference_data(mtrace):
+ idata_posterior_dict = {}
+ for varname in mtrace.varnames:
+ vals = num.atleast_2d(mtrace.get_values(varname).T)
+ if num.isnan(vals).any():
+ logger.warning("Variable '%s' contains NaN values." % varname)
+
+ size, draws = vals.shape
+ if size > 1:
+ vals = num.atleast_3d(vals).T
+
+ idata_posterior_dict[varname] = vals
+
+ idata = convert_to_inference_data(idata_posterior_dict)
+ return idata
diff --git a/beat/bem/__init__.py b/beat/bem/__init__.py
new file mode 100644
index 00000000..133b6dfd
--- /dev/null
+++ b/beat/bem/__init__.py
@@ -0,0 +1,2 @@
+from .base import * # noqa
+from .sources import * # noqa
diff --git a/beat/bem/base.py b/beat/bem/base.py
new file mode 100644
index 00000000..e8179f7c
--- /dev/null
+++ b/beat/bem/base.py
@@ -0,0 +1,311 @@
+from __future__ import annotations
+
+import logging
+
+import numpy as num
+from matplotlib import pyplot as plt
+from pyrocko.gf import StaticResult
+from pyrocko.guts import List, Object
+from pyrocko.guts_array import Array
+from pyrocko.moment_tensor import symmat6, moment_to_magnitude
+
+from .sources import DiscretizedBEMSource, check_intersection, slip_comp_to_idx
+
+try:
+ from cutde import halfspace as HS
+ from cutde.geometry import strain_to_stress
+
+except ImportError:
+ raise ImportError("'Cutde' needs to be installed!")
+
+
+logger = logging.getLogger("bem")
+km = 1.0e3
+
+
+class BEMResponse(Object):
+ sources = List.T(default=[])
+ targets = List.T(default=[])
+ discretized_sources = List.T()
+ displacements = Array.T(
+ shape=(None,), dtype=num.float32, serialize_as="base64", optional=True
+ )
+ target_ordering = Array.T(shape=(None,), dtype=num.int64, optional=True)
+ source_ordering = Array.T(shape=(None,), dtype=num.int64, optional=True)
+ inverted_slip_vectors = Array.T(shape=(None, 3), dtype=num.float32, optional=True)
+
+ @property
+ def n_sources(self):
+ return len(self.sources)
+
+ @property
+ def n_targets(self):
+ return len(self.targets)
+
+ @property
+ def is_valid(self):
+ if self.discretized_sources is None:
+ return False
+ else:
+ return True
+
+ def static_results(self) -> list[StaticResult]:
+ """
+ Get target specific surface displacements in NED coordinates.
+ """
+ results = []
+ for target_idx in range(self.n_targets):
+ start_idx = self.target_ordering[target_idx]
+ end_idx = self.target_ordering[target_idx + 1]
+
+ result = {
+ "displacement.n": self.displacements[start_idx:end_idx, 1],
+ "displacement.e": self.displacements[start_idx:end_idx, 0],
+ "displacement.d": -self.displacements[start_idx:end_idx, 2],
+ }
+ results.append(StaticResult(result=result))
+
+ return results
+
+ def source_slips(self) -> list[num.ndarray]:
+ """
+ Get inverted slip vectors for sources
+
+ Returns
+ -------
+ array_like: [n_triangles, 3]
+ where columns are: strike, dip and normal slip-components"""
+ slips = []
+ for src_idx in range(self.n_sources):
+ if self.source_ordering is not None:
+ start_idx = self.source_ordering[src_idx]
+ end_idx = self.source_ordering[src_idx + 1]
+ slips.append(self.inverted_slip_vectors[start_idx:end_idx, :])
+ else:
+ slips.append(None)
+ return slips
+
+ def get_source_magnitudes(self, shear_modulus):
+ inverted_slips = self.source_slips()
+ total_slips = [num.linalg.norm(slips, axis=1) for slips in inverted_slips]
+
+ magnitudes = []
+ for source, slips in zip(self.discretized_sources, total_slips):
+ moments = source.get_areas_triangles() * total_slips * shear_modulus
+ magnitudes.append(moment_to_magnitude(moments.sum()))
+
+ return magnitudes
+
+
+class BEMEngine(object):
+ def __init__(self, config) -> None:
+ self.config = config
+ self._obs_points = None
+ self._ncoords_targets = None
+
+ def cache_target_coords3(self, targets, dtype="float32"):
+ ncoords_targets = num.cumsum([0] + [target.ncoords for target in targets])
+ if self._ncoords_targets is None:
+ self._ncoords_targets = ncoords_targets
+ coords_diff = 0
+ else:
+ coords_diff = self._ncoords_targets.sum() - ncoords_targets.sum()
+
+ if self._obs_points is None or coords_diff:
+ coords5 = num.vstack([target.coords5 for target in targets])
+ obs_pts = num.zeros((coords5.shape[0], 3))
+ obs_pts[:, 0] = coords5[:, 3]
+ obs_pts[:, 1] = coords5[:, 2]
+ self._obs_points = obs_pts.astype(dtype)
+ self._ncoords_targets = ncoords_targets
+
+ return self._obs_points
+
+ def clear_target_cache(self):
+ self._obs_points = None
+ self._ncoords_targets = None
+
+ def process(self, sources: list, targets: list, debug=False) -> num.ndarray:
+ mesh_size = self.config.mesh_size * km
+
+ if self.config.check_mesh_intersection:
+ intersect = check_intersection(sources, mesh_size=mesh_size)
+ else:
+ intersect = False
+
+ obs_points = self.cache_target_coords3(targets, dtype="float32")
+
+ if intersect:
+ return BEMResponse(
+ sources=sources,
+ targets=targets,
+ discretized_sources=None,
+ displacements=num.full(
+ (obs_points.shape[0], 3), -99.0, dtype="float64"
+ ),
+ target_ordering=self._ncoords_targets,
+ source_ordering=None,
+ inverted_slip_vectors=None,
+ )
+
+ discretized_sources = [
+ source.discretize_basesource(mesh_size=mesh_size, plot=False)
+ for source in sources
+ ]
+
+ coefficient_matrix = self.get_interaction_matrix(
+ discretized_sources, debug=debug
+ )
+ tractions = self.config.boundary_conditions.get_traction_field(
+ discretized_sources
+ )
+
+ if debug:
+ ax = plt.axes()
+ im = ax.matshow(coefficient_matrix)
+ ax.set_title("Interaction matrix")
+ plt.colorbar(im)
+ print("CEF shape", coefficient_matrix.shape)
+
+ # solve with least squares
+ inv_slips = num.linalg.multi_dot(
+ [
+ num.linalg.inv(coefficient_matrix.T.dot(coefficient_matrix)),
+ coefficient_matrix.T,
+ tractions,
+ ]
+ )
+
+ all_triangles = num.vstack(
+ [source.triangles_xyz for source in discretized_sources]
+ )
+ disp_mat = HS.disp_matrix(
+ obs_pts=obs_points, tris=all_triangles, nu=self.config.poissons_ratio
+ )
+
+ n_all_triangles = all_triangles.shape[0]
+ slips = num.zeros((n_all_triangles, 3))
+
+ start_idx = 0
+ sources_ntriangles = num.cumsum(
+ [start_idx] + [source.n_triangles for source in discretized_sources]
+ )
+ for bcond in self.config.boundary_conditions.iter_conditions():
+ for source_idx in bcond.source_idxs:
+ source_mesh = discretized_sources[source_idx]
+ end_idx = start_idx + source_mesh.n_triangles
+
+ slips[
+ sources_ntriangles[source_idx] : sources_ntriangles[source_idx + 1],
+ slip_comp_to_idx[bcond.slip_component],
+ ] = inv_slips[start_idx:end_idx]
+
+ start_idx += source_mesh.n_triangles
+
+ displacements = disp_mat.reshape((-1, n_all_triangles * 3)).dot(slips.ravel())
+ return BEMResponse(
+ sources=sources,
+ targets=targets,
+ discretized_sources=discretized_sources,
+ displacements=displacements.reshape((-1, 3)),
+ target_ordering=self._ncoords_targets,
+ source_ordering=sources_ntriangles,
+ inverted_slip_vectors=slips,
+ )
+
+ def get_interaction_matrix(
+ self, discretized_sources: list, debug: bool
+ ) -> num.ndarray:
+ G_slip_components = [[], [], []]
+ for bcond in self.config.boundary_conditions.iter_conditions():
+ for source_idx in bcond.source_idxs:
+ source_mesh = discretized_sources[source_idx]
+
+ Gs_strike = []
+ Gs_dip = []
+ Gs_normal = []
+ for receiver_idx in bcond.receiver_idxs:
+ receiver_mesh = discretized_sources[receiver_idx]
+ g_strike, g_dip, g_normal = get_coefficient_matrices_tdcs(
+ receiver_mesh,
+ source_mesh.triangles_xyz,
+ bcond.slip_component,
+ nu=self.config.poissons_ratio,
+ mu=self.config.shear_modulus,
+ )
+
+ if debug:
+ figs, axs = plt.subplots(1, 3)
+ for k, (comp, g_comp) in enumerate(
+ zip(
+ ("strike", "dip", "normal"), (g_strike, g_dip, g_normal)
+ )
+ ):
+ axs[k].matshow(g_comp)
+ axs[k].set_title(comp)
+
+ plt.show()
+
+ Gs_strike.append(g_strike)
+ Gs_dip.append(g_dip)
+ Gs_normal.append(g_normal)
+
+ G_slip_components[0].append(num.vstack(Gs_strike))
+ G_slip_components[1].append(num.vstack(Gs_dip))
+ G_slip_components[2].append(num.vstack(Gs_normal))
+
+ return num.block(G_slip_components)
+
+ def get_store(self, store_id):
+ """Dummy method to allow compatibility"""
+ return None
+
+
+def get_coefficient_matrices_tdcs(
+ discretized_bem_source: DiscretizedBEMSource,
+ triangles_xyz: num.ndarray,
+ slip_component: str,
+ nu: float,
+ mu: float,
+) -> list[num.ndarray]:
+ """
+ Calculates interaction matrix between source triangles and receiver triangles.
+
+ Parameters
+ ----------
+ slip_component:
+
+ Returns
+ -------
+ """
+
+ strain_mat = HS.strain_matrix(
+ discretized_bem_source.centroids, triangles_xyz, nu=nu
+ )
+
+ # select relevant source slip vector component indexs (0-strike, 1-dip, 2-normal)
+ slip_idx = slip_comp_to_idx[slip_component]
+ comp_strain_mat = strain_mat[:, :, :, slip_idx]
+ comp_strain_mat_T = num.transpose(comp_strain_mat, (0, 2, 1))
+
+ comp_stress_mat_T = strain_to_stress(
+ comp_strain_mat_T.reshape((-1, 6)), mu, nu
+ ).reshape(comp_strain_mat_T.shape)
+
+ stress_mat_m9s = symmat6(*comp_stress_mat_T.T).T
+
+ # get traction vector from Stress tensor
+ tvs = num.sum(
+ stress_mat_m9s * discretized_bem_source.unit_normal_vectors[:, None, None, :],
+ axis=-1,
+ )
+
+ # get stress components from traction vector
+ g_strike = num.sum(
+ tvs * discretized_bem_source.unit_strike_vectors[:, None, :], axis=-1
+ )
+ g_dip = num.sum(tvs * discretized_bem_source.unit_dip_vectors[:, None, :], axis=-1)
+ g_normal = num.sum(
+ tvs * discretized_bem_source.unit_normal_vectors[:, None, :], axis=-1
+ )
+ return g_strike, g_dip, -g_normal # Minus is needed due to ENU convention
diff --git a/beat/bem/sources.py b/beat/bem/sources.py
new file mode 100644
index 00000000..1b7a31e9
--- /dev/null
+++ b/beat/bem/sources.py
@@ -0,0 +1,1032 @@
+from __future__ import annotations
+
+import logging
+import os
+from dataclasses import dataclass
+from time import time
+
+import numpy as num
+from pyrocko.gf.seismosizer import Source, outline_rect_source
+from pyrocko.guts import Float, Tuple
+from pyrocko.orthodrome import ne_to_latlon
+
+try:
+ import pygmsh
+
+ gmsh = pygmsh.helpers.gmsh
+
+ nthreads = os.environ.get("NUM_THREADS", "1")
+
+except ImportError:
+ raise ImportError("'Pygmsh' needs to be installed!")
+
+try:
+ from cutde.geometry import compute_efcs_to_tdcs_rotations
+
+except ImportError:
+ raise ImportError("'Cutde' needs to be installed!")
+
+
+logger = logging.getLogger("bem.sources")
+
+origintypes = {"east_shift", "north_shift", "depth"}
+
+
+DEG2RAD = num.pi / 180.0
+km = 1.0e3
+
+
+slip_comp_to_idx = {
+ "strike": 0,
+ "dip": 1,
+ "normal": 2,
+}
+
+
+__all__ = [
+ "DiscretizedBEMSource",
+ "RingfaultBEMSource",
+ "DiskBEMSource",
+ "TriangleBEMSource",
+ "RectangularBEMSource",
+ "CurvedBEMSource",
+ "source_catalog",
+ "check_intersection",
+]
+
+
+def get_node_name(prefix: str, suffix):
+ if prefix:
+ return f"{prefix}_{suffix}_node"
+ else:
+ return f"{suffix}_node"
+
+
+class DiscretizedBEMSource(object):
+ def __init__(self, mesh, dtype=None, tractions=(0, 0, 0), mesh_size=1.0):
+ self._points = mesh.points.astype(dtype) if dtype is not None else mesh.points
+ self._mesh = mesh
+ self._centroids = None
+ self._tdcs = None
+ self._e_strike = None
+ self._e_dip = None
+ self._e_normal = None
+ self.mesh_size = mesh_size
+
+ self.tractions = tractions
+
+ def __repr__(self):
+ return self._mesh.__repr__()
+
+ def get_traction_vector(self, slip_component):
+ return (
+ num.ones((self.n_triangles))
+ * self.tractions[slip_comp_to_idx[slip_component]]
+ )
+
+ @property
+ def vertices(self):
+ """Coordinates of vertices in [m] (n_vertices, 3)"""
+ return self._points
+
+ @property
+ def n_vertices(self):
+ return len(self.vertices)
+
+ @property
+ def triangles_idxs(self):
+ return self._mesh.cells_dict["triangle"]
+
+ @property
+ def triangles_xyz(self):
+ """
+ Returns:
+ :class:`numpy.ndarray` (n_triangles, n_points [3], n_dimensions [3])
+ """
+ return self.vertices[self.triangles_idxs]
+
+ @property
+ def p1_xyz(self):
+ """
+ Coordinates xyz [m] of all points p1
+
+ Returns:
+ :class:`numpy.ndarray` [n_triangles, 3]
+ """
+ return self.triangles_xyz[:, 0]
+
+ @property
+ def p2_xyz(self):
+ """
+ Coordinates xyz [m] of all points p2
+
+ Returns:
+ :class:`numpy.ndarray` [n_triangles, 3]
+ """
+ return self.triangles_xyz[:, 1]
+
+ @property
+ def p3_xyz(self):
+ """
+ Coordinates xyz [m] of all points p3
+
+ Returns:
+ :class:`numpy.ndarray` [n_triangles, 3]
+ """
+ return self.triangles_xyz[:, 2]
+
+ @property
+ def vector_p1p2(self):
+ return self.p2_xyz - self.p1_xyz
+
+ @property
+ def vector_p1p3(self):
+ return self.p3_xyz - self.p1_xyz
+
+ def get_areas_triangles(self):
+ """
+ Area of triangles [$m^2$]
+
+ Returns:
+ :class:`numpy.ndarray` [n_triangles]
+ """
+ return (
+ num.linalg.norm(num.cross(self.vector_p1p2, self.vector_p1p3), axis=1) / 2
+ )
+
+ def get_minmax_triangles_xyz(self):
+ mins = self.triangles_xyz.min(0)
+ maxs = self.triangles_xyz.max(0)
+ return num.vstack([mins, maxs]).T
+
+ @property
+ def n_triangles(self):
+ return self.triangles_xyz.shape[0]
+
+ @property
+ def centroids(self):
+ if self._centroids is None:
+ self._centroids = num.mean(self.triangles_xyz, axis=1)
+
+ return self._centroids
+
+ @property
+ def vectors_tdcs(self):
+ """
+ Unit vectors in Triangular Dislocation Coordinate System
+ """
+ if self._tdcs is None:
+ self._tdcs = compute_efcs_to_tdcs_rotations(self.triangles_xyz)
+
+ return self._tdcs
+
+ @property
+ def unit_strike_vectors(self):
+ if self._e_strike is None:
+ strike_vec = self.vectors_tdcs[:, 0, :]
+ strike_vec /= num.linalg.norm(strike_vec, axis=1)[:, None]
+ self._e_strike = strike_vec
+ return self._e_strike
+
+ @property
+ def unit_dip_vectors(self):
+ if self._e_dip is None:
+ dip_vec = self.vectors_tdcs[:, 1, :]
+ dip_vec /= num.linalg.norm(dip_vec, axis=1)[:, None]
+ self._e_dip = dip_vec
+ return self._e_dip
+
+ @property
+ def unit_normal_vectors(self):
+ if self._e_normal is None:
+ normal_vec = self.vectors_tdcs[:, 2, :]
+ normal_vec /= num.linalg.norm(normal_vec, axis=1)[:, None]
+ self._e_normal = normal_vec
+ return self._e_normal
+
+
+@dataclass
+class Node:
+ """Class for storing coordinates of a node in a mesh."""
+
+ x: float
+ y: float
+ z: float
+
+
+class BEMSource(Source):
+ def _init_points_geometry(
+ self, geom=None, prefixes=(""), suffixes=(""), mesh_size=1.0
+ ):
+ for prefix in prefixes:
+ for suffix in suffixes:
+ node_name = get_node_name(prefix, suffix)
+ node = getattr(self, node_name)
+
+ if geom is not None:
+ self.points[node_name] = geom.add_point(node, mesh_size=mesh_size)
+ else:
+ raise ValueError("Geometry needs to be initialized first!")
+
+ def get_tractions(self):
+ raise NotImplementedError("Implement in inherited class")
+
+ def _get_arch_points(self, node_names: list[str]) -> list:
+ try:
+ return [self.points[node_name] for node_name in node_names]
+ except KeyError:
+ raise ValueError("Points are not fully initialized in geometry!")
+
+ def get_source_surface(self, geom, mesh_size):
+ raise NotImplementedError
+
+ def discretize_basesource(self, mesh_size, target=None, plot=False):
+ with pygmsh.geo.Geometry() as geom:
+ gmsh.option.setNumber("General.NumThreads", int(nthreads))
+
+ surf = self.get_source_surface(geom, mesh_size)
+ if len(surf) > 1:
+ geom.add_surface_loop(surf)
+
+ mesh = geom.generate_mesh()
+
+ if plot:
+ gmsh.fltk.run()
+
+ return DiscretizedBEMSource(
+ mesh=mesh,
+ mesh_size=mesh_size,
+ dtype="float32",
+ tractions=self.get_tractions(),
+ )
+
+
+class TriangleBEMSource(BEMSource):
+ strike_traction = Float.T(
+ default=0.0, help="Traction [Pa] in strike-direction of the Triangles"
+ )
+ dip_traction = Float.T(
+ default=0.0, help="Traction [Pa] in dip-direction of the Triangles"
+ )
+ normal_traction = Float.T(
+ default=0.0, help="Traction [Pa] in normal-direction of the Triangles"
+ )
+
+ p1 = Tuple.T(3, Float.T(), default=(0, 1, -1))
+ p2 = Tuple.T(3, Float.T(), default=(1, 0, -1))
+ p3 = Tuple.T(3, Float.T(), default=(-1, 0, -1))
+
+ def get_source_surface(self, geom, mesh_size):
+ gp1 = geom.add_point(self.p1, mesh_size=mesh_size)
+ gp2 = geom.add_point(self.p2, mesh_size=mesh_size)
+ gp3 = geom.add_point(self.p3, mesh_size=mesh_size)
+
+ l1 = geom.add_line(gp1, gp2)
+ l2 = geom.add_line(gp2, gp3)
+ l3 = geom.add_line(gp3, gp1)
+
+ edge = geom.add_curve_loop([l1, l2, l3])
+ return [geom.add_surface(edge)]
+
+ def get_tractions(self):
+ return (
+ self.strike_traction, # coordinate transform ENU - NED
+ self.dip_traction,
+ self.normal_traction,
+ )
+
+
+class EllipseBEMSource(BEMSource):
+ a_half_axis = Float.T(default=0.5 * km)
+ b_half_axis = Float.T(default=0.3 * km)
+
+ strike = Float.T(default=0.0)
+
+ normal_traction = Float.T(
+ default=0.0, help="Traction [Pa] in normal-direction of the Triangles"
+ )
+
+ def __init__(self, **kwargs):
+ BEMSource.__init__(self, **kwargs)
+ self.points = {}
+
+ def get_tractions(self):
+ raise NotImplementedError("Needs implementation in inheriting class")
+
+ @property
+ def _origin(self):
+ return Node(x=self.east_shift, y=self.north_shift, z=-self.depth)
+
+ @property
+ def origin_node(self):
+ return (
+ self._origin.x,
+ self._origin.y,
+ self._origin.z,
+ )
+
+ @property
+ def left_a_node(self):
+ return (
+ self._origin.x,
+ self._origin.y - self.a_half_axis,
+ self._origin.z,
+ )
+
+ @property
+ def right_a_node(self):
+ return (
+ self._origin.x,
+ self._origin.y + self.a_half_axis,
+ self._origin.z,
+ )
+
+ @property
+ def upper_b_node(self):
+ return (
+ self._origin.x + self.b_half_axis,
+ self._origin.y,
+ self._origin.z,
+ )
+
+ @property
+ def lower_b_node(self):
+ return (
+ self._origin.x - self.b_half_axis,
+ self._origin.y,
+ self._origin.z,
+ )
+
+ def _get_node_suffixes(self):
+ return (
+ "left_a",
+ "right_a",
+ "upper_b",
+ "lower_b",
+ "origin",
+ )
+
+ def get_top_upper_left_arch_points(self):
+ return self._get_arch_points(
+ [
+ "left_a_node",
+ "origin_node",
+ "upper_b_node",
+ "upper_b_node",
+ ]
+ )
+
+ def get_top_upper_right_arch_points(self):
+ return self._get_arch_points(
+ [
+ "upper_b_node",
+ "origin_node",
+ "right_a_node",
+ "right_a_node",
+ ]
+ )
+
+ def get_top_lower_right_arch_points(self):
+ return self._get_arch_points(
+ [
+ "right_a_node",
+ "origin_node",
+ "lower_b_node",
+ "lower_b_node",
+ ]
+ )
+
+ def get_top_lower_left_arch_points(self):
+ return self._get_arch_points(
+ [
+ "lower_b_node",
+ "origin_node",
+ "left_a_node",
+ "left_a_node",
+ ]
+ )
+
+
+class DiskBEMSource(EllipseBEMSource):
+ plunge = Float.T(default=0.0)
+ dip = Float.T(default=0.0)
+ rake = Float.T(default=0.0, help="Rake-angle [deg] towards the North.")
+ traction = Float.T(default=0.0, help="Traction [Pa] in rake direction.")
+
+ def get_tractions(self):
+ strike_traction = -num.cos(self.rake * DEG2RAD) * self.traction
+ dip_traction = -num.sin(self.rake * DEG2RAD) * self.traction
+ return (
+ strike_traction,
+ dip_traction,
+ self.normal_traction,
+ )
+
+ def __init__(self, **kwargs):
+ EllipseBEMSource.__init__(self, **kwargs)
+
+ def outline(self, cs="xy", npoints=50):
+ return num.flipud(
+ get_ellipse_points(
+ self.lon,
+ self.lat,
+ self.east_shift,
+ self.north_shift,
+ self.a_half_axis,
+ self.b_half_axis,
+ self.dip,
+ self.plunge,
+ self.strike,
+ cs=cs,
+ npoints=npoints,
+ )
+ )
+
+ def get_source_surface(self, geom, mesh_size):
+ self._init_points_geometry(
+ geom,
+ prefixes=("",),
+ suffixes=self._get_node_suffixes(),
+ mesh_size=mesh_size,
+ )
+
+ rotations = (-self.plunge, -self.dip, self.strike)
+ axes = ((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
+ for point in self.points.values():
+ for rot_angle, axis in zip(rotations, axes):
+ if rot_angle != 0:
+ # TODO if rotation results in one point ending at the exact
+ # same location of other point it will be removed
+ geom.rotate(
+ point,
+ self.origin_node,
+ -rot_angle * DEG2RAD,
+ axis,
+ )
+
+ t_arch_ul = geom.add_ellipse_arc(*self.get_top_upper_left_arch_points())
+ t_arch_ur = geom.add_ellipse_arc(*self.get_top_upper_right_arch_points())
+ t_arch_lr = geom.add_ellipse_arc(*self.get_top_lower_right_arch_points())
+ t_arch_ll = geom.add_ellipse_arc(*self.get_top_lower_left_arch_points())
+
+ ellipse = geom.add_curve_loop([t_arch_ul, t_arch_ur, t_arch_lr, t_arch_ll])
+
+ return [geom.add_surface(ellipse)]
+
+
+class RingfaultBEMSource(EllipseBEMSource):
+ delta_east_shift_bottom = Float.T(default=0.0 * km)
+ delta_north_shift_bottom = Float.T(default=0.0 * km)
+ depth_bottom = Float.T(default=1.0 * km)
+
+ a_half_axis = Float.T(default=0.5 * km)
+ b_half_axis = Float.T(default=0.3 * km)
+ a_half_axis_bottom = Float.T(default=0.55 * km)
+ b_half_axis_bottom = Float.T(default=0.35 * km)
+
+ strike_traction = Float.T(
+ default=0.0, help="Traction [Pa] in strike-direction of the Triangles"
+ )
+ dip_traction = Float.T(
+ default=0.0, help="Traction [Pa] in dip-direction of the Triangles"
+ )
+
+ def __init__(self, **kwargs):
+ EllipseBEMSource.__init__(self, **kwargs)
+
+ def get_tractions(self):
+ return (
+ -self.strike_traction, # coordinate transform ENU - NED
+ self.dip_traction,
+ self.normal_traction,
+ )
+
+ @property
+ def _bottom_origin(self):
+ return Node(
+ x=self._origin.x + self.delta_east_shift_bottom,
+ y=self._origin.y + self.delta_north_shift_bottom,
+ z=-self.depth_bottom,
+ )
+
+ @property
+ def bottom_origin_node(self):
+ return (
+ self._bottom_origin.x,
+ self._bottom_origin.y,
+ self._bottom_origin.z,
+ )
+
+ @property
+ def bottom_left_a_node(self):
+ return (
+ self._bottom_origin.x,
+ self._bottom_origin.y - self.a_half_axis_bottom,
+ self._bottom_origin.z,
+ )
+
+ @property
+ def bottom_right_a_node(self):
+ return (
+ self._bottom_origin.x,
+ self._bottom_origin.y + self.a_half_axis_bottom,
+ self._bottom_origin.z,
+ )
+
+ @property
+ def bottom_upper_b_node(self):
+ return (
+ self._bottom_origin.x + self.b_half_axis_bottom,
+ self._bottom_origin.y,
+ self._bottom_origin.z,
+ )
+
+ @property
+ def bottom_lower_b_node(self):
+ return (
+ self._bottom_origin.x - self.b_half_axis_bottom,
+ self._bottom_origin.y,
+ self._bottom_origin.z,
+ )
+
+ def get_bottom_upper_left_arch_points(self):
+ return self._get_arch_points(
+ [
+ "bottom_left_a_node",
+ "bottom_origin_node",
+ "bottom_upper_b_node",
+ "bottom_upper_b_node",
+ ]
+ )
+
+ def get_bottom_upper_right_arch_points(self):
+ return self._get_arch_points(
+ [
+ "bottom_upper_b_node",
+ "bottom_origin_node",
+ "bottom_right_a_node",
+ "bottom_right_a_node",
+ ]
+ )
+
+ def get_bottom_lower_right_arch_points(self):
+ return self._get_arch_points(
+ [
+ "bottom_right_a_node",
+ "bottom_origin_node",
+ "bottom_lower_b_node",
+ "bottom_lower_b_node",
+ ]
+ )
+
+ def get_bottom_lower_left_arch_points(self):
+ return self._get_arch_points(
+ [
+ "bottom_lower_b_node",
+ "bottom_origin_node",
+ "bottom_left_a_node",
+ "bottom_left_a_node",
+ ]
+ )
+
+ def get_left_a_connecting_points(self):
+ return self._get_arch_points(["left_a_node", "bottom_left_a_node"])
+
+ def get_right_a_connecting_points(self):
+ return self._get_arch_points(["right_a_node", "bottom_right_a_node"])
+
+ def get_upper_b_connecting_points(self):
+ return self._get_arch_points(["upper_b_node", "bottom_upper_b_node"])
+
+ def get_lower_b_connecting_points(self):
+ return self._get_arch_points(["lower_b_node", "bottom_lower_b_node"])
+
+ def get_source_surface(self, geom, mesh_size):
+ self._init_points_geometry(
+ geom,
+ prefixes=("", "bottom"),
+ suffixes=self._get_node_suffixes(),
+ mesh_size=mesh_size,
+ )
+
+ if self.strike != 0:
+ for point in self.points.values():
+ geom.rotate(
+ point,
+ (self.east_shift, self.north_shift, -self.depth),
+ -self.strike * DEG2RAD,
+ (0.0, 0.0, 1.0),
+ )
+
+ t_arch_ul = geom.add_ellipse_arc(*self.get_top_upper_left_arch_points())
+ t_arch_ur = geom.add_ellipse_arc(*self.get_top_upper_right_arch_points())
+ t_arch_lr = geom.add_ellipse_arc(*self.get_top_lower_right_arch_points())
+ t_arch_ll = geom.add_ellipse_arc(*self.get_top_lower_left_arch_points())
+
+ b_arch_ul = geom.add_ellipse_arc(*self.get_bottom_upper_left_arch_points())
+ b_arch_ur = geom.add_ellipse_arc(*self.get_bottom_upper_right_arch_points())
+ b_arch_lr = geom.add_ellipse_arc(*self.get_bottom_lower_right_arch_points())
+ b_arch_ll = geom.add_ellipse_arc(*self.get_bottom_lower_left_arch_points())
+
+ c_lmaj = geom.add_line(*self.get_left_a_connecting_points())
+ c_rmaj = geom.add_line(*self.get_right_a_connecting_points())
+ c_umin = geom.add_line(*self.get_upper_b_connecting_points())
+ c_lmin = geom.add_line(*self.get_lower_b_connecting_points())
+
+ m_top_left = geom.add_curve_loop([t_arch_ul, c_umin, -b_arch_ul, -c_lmaj])
+ m_top_right = geom.add_curve_loop([t_arch_ur, c_rmaj, -b_arch_ur, -c_umin])
+ m_bottom_right = geom.add_curve_loop([t_arch_lr, c_lmin, -b_arch_lr, -c_rmaj])
+ m_bottom_left = geom.add_curve_loop([t_arch_ll, c_lmaj, -b_arch_ll, -c_lmin])
+ mantle = [
+ geom.add_surface(quadrant)
+ for quadrant in (m_top_left, m_top_right, m_bottom_right, m_bottom_left)
+ ]
+ return mantle
+ # return geom.add_surface_loop(mantle)
+
+ def outline(self, cs="xy", npoints=50):
+ upper_ellipse = get_ellipse_points(
+ self.lon,
+ self.lat,
+ self.east_shift,
+ self.north_shift,
+ self.a_half_axis,
+ self.b_half_axis,
+ 0.0,
+ 0.0,
+ self.strike,
+ cs=cs,
+ npoints=npoints,
+ )
+ lower_ellipse = get_ellipse_points(
+ self.lon,
+ self.lat,
+ self.east_shift + self.delta_east_shift_bottom,
+ self.north_shift + self.delta_north_shift_bottom,
+ self.a_half_axis_bottom,
+ self.b_half_axis_bottom,
+ 0.0,
+ 0.0,
+ self.strike,
+ cs=cs,
+ npoints=npoints,
+ )
+ return num.vstack([upper_ellipse, lower_ellipse])
+
+
+class RectangularBEMSource(BEMSource):
+ width = Float.T(default=5 * km, help="Width [m] of the fault plane.")
+ length = Float.T(default=10 * km, help="Length [m] of the fault plane.")
+ dip = Float.T(default=0, help="Dip-angle [deg] towards the horizontal.")
+ strike = Float.T(default=0.0, help="Strike-angle [deg] towards the North.")
+ rake = Float.T(default=0.0, help="Rake-angle [deg] towards the North.")
+ traction = Float.T(default=0.0, help="Traction [Pa] in rake direction.")
+ normal_traction = Float.T(
+ default=0.0, help="Traction [Pa] in normal-direction of the Triangles"
+ )
+
+ def __init__(self, **kwargs):
+ BEMSource.__init__(self, **kwargs)
+ self.points = {}
+
+ def outline(self, cs="xyz"):
+ points = outline_rect_source(
+ self.strike, self.dip, self.length, self.width, anchor="top"
+ )
+
+ points[:, 0] += self.north_shift
+ points[:, 1] += self.east_shift
+ points[:, 2] += self.depth
+ if cs == "xyz":
+ return points
+ elif cs == "xy":
+ return points[:, :2]
+ elif cs in ("latlon", "lonlat"):
+ latlon = ne_to_latlon(self.lat, self.lon, points[:, 0], points[:, 1])
+
+ latlon = num.array(latlon).T
+ if cs == "latlon":
+ return latlon
+ else:
+ return latlon[:, ::-1]
+
+ def get_tractions(self):
+ strike_traction = -num.cos(self.rake * DEG2RAD) * self.traction
+ dip_traction = -num.sin(self.rake * DEG2RAD) * self.traction
+ return (
+ strike_traction,
+ dip_traction,
+ self.normal_traction,
+ )
+
+ @property
+ def _origin(self):
+ return Node(x=self.east_shift, y=self.north_shift, z=-self.depth)
+
+ @property
+ def origin_node(self):
+ return (
+ self._origin.x,
+ self._origin.y,
+ self._origin.z,
+ )
+
+ @property
+ def _top_right(self):
+ return Node(
+ self._origin.x,
+ self._origin.y + self.length / 2,
+ self._origin.z,
+ )
+
+ @property
+ def top_right_node(self):
+ return (
+ self._top_right.x,
+ self._top_right.y,
+ self._top_right.z,
+ )
+
+ @property
+ def _top_left(self):
+ return Node(
+ self._origin.x,
+ self._origin.y - self.length / 2,
+ self._origin.z,
+ )
+
+ @property
+ def top_left_node(self):
+ return (
+ self._top_left.x,
+ self._top_left.y,
+ self._top_left.z,
+ )
+
+ @property
+ def _bottom_left(self):
+ return Node(
+ self._origin.x + self.width,
+ self._origin.y - self.length / 2,
+ self._origin.z,
+ )
+
+ @property
+ def bottom_left_node(self):
+ return (
+ self._bottom_left.x,
+ self._bottom_left.y,
+ self._bottom_left.z,
+ )
+
+ @property
+ def _bottom_right(self):
+ return Node(
+ self._origin.x + self.width,
+ self._origin.y + self.length / 2,
+ self._origin.z,
+ )
+
+ @property
+ def bottom_right_node(self):
+ return (
+ self._bottom_right.x,
+ self._bottom_right.y,
+ self._bottom_right.z,
+ )
+
+ def _get_node_suffixes(self):
+ return ("left", "right")
+
+ def get_top_edge(self):
+ return self._get_arch_points(["top_left_node", "top_right_node"])
+
+ def get_bottom_edge(self):
+ return self._get_arch_points(["bottom_left_node", "bottom_right_node"])
+
+ def get_left_edge(self):
+ return self._get_arch_points(["top_left_node", "bottom_left_node"])
+
+ def get_right_edge(self):
+ return self._get_arch_points(["top_right_node", "bottom_right_node"])
+
+ def get_source_surface(self, geom, mesh_size):
+ self._init_points_geometry(
+ geom,
+ prefixes=("top", "bottom"),
+ suffixes=self._get_node_suffixes(),
+ mesh_size=mesh_size,
+ )
+
+ top = geom.add_bezier(self.get_top_edge())
+ right = geom.add_bezier(self.get_right_edge())
+ bottom = geom.add_bezier(self.get_bottom_edge())
+ left = geom.add_bezier(self.get_left_edge())
+
+ rectangle = geom.add_curve_loop([-top, left, bottom, -right])
+ rectangle_surface = geom.add_surface(rectangle)
+
+ rotations = (-self.dip, self.strike)
+ axes = ((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
+
+ for rot_angle, axis in zip(rotations, axes):
+ if rot_angle != 0:
+ geom.rotate(
+ rectangle_surface,
+ self.origin_node,
+ -rot_angle * DEG2RAD,
+ axis,
+ )
+
+ return [rectangle_surface]
+
+
+class CurvedBEMSource(RectangularBEMSource):
+ curv_location_bottom = Float.T(0.0)
+ curv_amplitude_bottom = Float.T(0.0)
+ bend_location = Float.T(0.0)
+ bend_amplitude = Float.T(0.0)
+
+ @property
+ def bend_left_node(self):
+ return (
+ self._top_left.x + self.width * self.bend_location,
+ self._top_left.y,
+ self._top_left.z + self.width * self.bend_amplitude,
+ )
+
+ @property
+ def bend_right_node(self):
+ return (
+ self._top_right.x + self.width * self.bend_location,
+ self._top_right.y,
+ self._top_right.z + self.width * self.bend_amplitude,
+ )
+
+ @property
+ def curve_left_node(self):
+ """Shallow edge - no curve for now"""
+ return (
+ self._origin.x,
+ self._origin.y,
+ self._origin.z,
+ )
+
+ @property
+ def curve_right_node(self):
+ return (
+ self._bottom_left.x,
+ self._bottom_left.y + self.length * self.curv_location_bottom,
+ self._bottom_left.z + self.length * self.curv_amplitude_bottom,
+ )
+
+ def get_top_edge(self):
+ return self._get_arch_points(
+ ["top_left_node", "curve_left_node", "top_right_node"]
+ )
+
+ def get_bottom_edge(self):
+ return self._get_arch_points(
+ ["bottom_left_node", "curve_right_node", "bottom_right_node"]
+ )
+
+ def get_left_edge(self):
+ return self._get_arch_points(
+ ["top_left_node", "bend_left_node", "bottom_left_node"]
+ )
+
+ def get_right_edge(self):
+ return self._get_arch_points(
+ ["top_right_node", "bend_right_node", "bottom_right_node"]
+ )
+
+ def get_source_surface(self, geom, mesh_size):
+ self._init_points_geometry(
+ geom,
+ prefixes=("top", "bottom", "curve", "bend"),
+ suffixes=self._get_node_suffixes(),
+ mesh_size=mesh_size,
+ )
+
+ top = geom.add_bezier(self.get_top_edge())
+ right = geom.add_bezier(self.get_right_edge())
+ bottom = geom.add_bezier(self.get_bottom_edge())
+ left = geom.add_bezier(self.get_left_edge())
+
+ quadrangle = geom.add_curve_loop([top, right, -bottom, -left])
+ quad_surface = geom.add_surface(quadrangle)
+
+ rotations = (-self.dip, self.strike)
+ axes = ((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
+
+ for rot_angle, axis in zip(rotations, axes):
+ if rot_angle != 0:
+ geom.rotate(
+ quad_surface,
+ self.origin_node,
+ -rot_angle * DEG2RAD,
+ axis,
+ )
+ return [quad_surface]
+
+
+def get_ellipse_points(
+ lon: float,
+ lat: float,
+ east_shift: float,
+ north_shift: float,
+ a_half_axis: float,
+ b_half_axis: float,
+ dip: float,
+ plunge: float,
+ strike: float,
+ cs: str = "xy",
+ npoints: int = 50,
+) -> num.ndarray:
+ a_half_axis_rot = a_half_axis * num.cos(dip * DEG2RAD)
+ b_half_axis_rot = b_half_axis * num.cos(plunge * DEG2RAD)
+
+ ring = num.linspace(0, 2 * num.pi, npoints)
+ ellipse = num.array(
+ [b_half_axis_rot * num.cos(ring), a_half_axis_rot * num.sin(ring)]
+ )
+
+ strike_rad = -strike * DEG2RAD
+ rot_strike = num.array(
+ [
+ [num.cos(strike_rad), -num.sin(strike_rad)],
+ [num.sin(strike_rad), num.cos(strike_rad)],
+ ]
+ )
+ ellipse_rot = rot_strike.dot(ellipse)
+
+ points = num.atleast_2d(num.zeros([npoints, 2]))
+ points[:, 0] += ellipse_rot[1, :] + north_shift
+ points[:, 1] += ellipse_rot[0, :] + east_shift
+
+ if cs == "xy":
+ return points
+ elif cs in ("latlon", "lonlat"):
+ latlon = ne_to_latlon(lat, lon, points[:, 0], points[:, 1])
+
+ latlon = num.array(latlon).T
+ if cs == "latlon":
+ return latlon
+ else:
+ return latlon[:, ::-1]
+ else:
+ raise NotImplementedError(f"Coordinate system '{cs}' is not implemented.")
+
+
+def check_intersection(sources: list, mesh_size: float = 0.5) -> bool:
+ """
+ Computationally expensive check for source intersection.
+ """
+ n_sources = len(sources)
+ if n_sources > 1:
+ with pygmsh.occ.Geometry() as geom:
+ gmsh.option.setNumber("General.NumThreads", int(nthreads))
+ gmsh.option.setNumber("General.Verbosity", 1) # silence warnings
+
+ surfaces = []
+ for source in sources:
+ logger.debug(source.__str__())
+ surf = source.get_source_surface(geom, mesh_size)
+ surfaces.append(surf)
+
+ gmsh.model.occ.synchronize()
+ before = len(gmsh.model.getEntities())
+ logger.debug("Building source union ...")
+ t0 = time()
+ geom.boolean_union(surfaces, False, False)
+ logger.debug("Time for union: %f", time() - t0)
+
+ logger.debug("Synchronize")
+ gmsh.model.occ.synchronize()
+ after = len(gmsh.model.getEntities())
+
+ if after - before:
+ logger.debug("Sources intersect")
+ return True
+
+ logger.debug("Sources do not intersect")
+ return False
+
+
+source_names = """
+ TriangleBEMSource
+ DiskBEMSource
+ RingfaultBEMSource
+ RectangularBEMSource
+ CurvedBEMSource
+ """.split()
+
+source_classes = [
+ TriangleBEMSource,
+ DiskBEMSource,
+ RingfaultBEMSource,
+ RectangularBEMSource,
+ CurvedBEMSource,
+]
+
+source_catalog = dict(zip(source_names, source_classes))
diff --git a/beat/config.py b/beat/config.py
index 1e19674c..50cb6e26 100644
--- a/beat/config.py
+++ b/beat/config.py
@@ -6,15 +6,18 @@
implemented. Solving the fault geometry, the static distributed slip and the
kinematic distributed slip.
"""
+
import logging
import os
from collections import OrderedDict
+from typing import Dict as TDict
+from typing import List as TList
import numpy as num
from pyrocko import gf, model, trace, util
from pyrocko.cake import load_model
from pyrocko.gf import RectangularSource as PyrockoRS
-from pyrocko.gf.seismosizer import Cloneable, LocalEngine, stf_classes
+from pyrocko.gf.seismosizer import Cloneable
from pyrocko.guts import (
ArgumentError,
Bool,
@@ -29,10 +32,11 @@
dump,
load,
)
-from theano import config as tconfig
+from pytensor import config as tconfig
from beat import utility
from beat.covariance import available_noise_structures, available_noise_structures_2d
+from beat.defaults import default_decimation_factors, defaults
from beat.heart import (
ArrivalTaper,
Filter,
@@ -41,67 +45,40 @@
ReferenceLocation,
_domain_choices,
)
-from beat.sources import MTQTSource, MTSourceWithMagnitude, RectangularSource
+from beat.sources import RectangularSource, stf_catalog
+from beat.sources import source_catalog as geometry_source_catalog
from beat.utility import check_point_keys, list2string
+logger = logging.getLogger("config")
-guts_prefix = "beat"
-logger = logging.getLogger("config")
+try:
+ from beat.bem import source_catalog as bem_source_catalog
-ffi_mode_str = "ffi"
-geometry_mode_str = "geometry"
+ bem_catalog = {"geodetic": bem_source_catalog}
+except ImportError:
+ logger.warning(
+ "To enable 'bem' mode packages 'pygmsh' and 'cutde' need to be installed."
+ )
+ bem_catalog = {}
+ bem_source_catalog = {}
-block_vars = ["bl_azimuth", "bl_amplitude"]
-seis_vars = ["time", "duration"]
+source_catalog = {}
+for catalog in [geometry_source_catalog, bem_source_catalog]:
+ source_catalog.update(catalog)
-source_names = """
- ExplosionSource
- RectangularExplosionSource
- SFSource
- DCSource
- CLVDSource
- MTSource
- MTQTSource
- RectangularSource
- DoubleDCSource
- RingfaultSource
- """.split()
-
-source_classes = [
- gf.ExplosionSource,
- gf.RectangularExplosionSource,
- gf.SFSource,
- gf.DCSource,
- gf.CLVDSource,
- MTSourceWithMagnitude,
- MTQTSource,
- PyrockoRS,
- gf.DoubleDCSource,
- gf.RingfaultSource,
-]
-
-stf_names = """
- Boxcar
- Triangular
- HalfSinusoid
- """.split()
-
-source_catalog = {
- name: source_class for name, source_class in zip(source_names, source_classes)
-}
-stf_catalog = {name: stf_class for name, stf_class in zip(stf_names, stf_classes[1:4])}
+guts_prefix = "beat"
+
+stf_names = stf_catalog.keys()
+all_source_names = list(source_catalog.keys()) + list(bem_source_catalog.keys())
+
+ffi_mode_str = "ffi"
+geometry_mode_str = "geometry"
+bem_mode_str = "bem"
-interseismic_vars = [
- "east_shift",
- "north_shift",
- "strike",
- "dip",
- "length",
- "locking_depth",
-] + block_vars
+seis_vars = ["time", "duration"]
static_dist_vars = ["uparr", "uperp", "utens"]
derived_dist_vars = ["coupling"]
@@ -118,8 +95,6 @@
transd_vars_dist = partial_kinematic_vars + static_dist_vars + voronoi_locations
dist_vars = static_dist_vars + partial_kinematic_vars + derived_dist_vars
-interseismic_catalog = {"geodetic": interseismic_vars}
-
geometry_catalog = {
"polarity": source_catalog,
"geodetic": source_catalog,
@@ -132,7 +107,7 @@
[
[geometry_mode_str, geometry_catalog],
[ffi_mode_str, ffi_catalog],
- ["interseismic", interseismic_catalog],
+ [bem_mode_str, bem_catalog],
]
)
@@ -143,86 +118,12 @@
"RectangularSourcePole": ["magnitude", "coupling"],
}
-
-hyper_name_laplacian = "h_laplacian"
-
-sf_force = (0, 1e10)
-moffdiag = (-1.0, 1.0)
-mdiag = (-num.sqrt(2), num.sqrt(2))
-
-default_bounds = dict(
- east_shift=(-10.0, 10.0),
- north_shift=(-10.0, 10.0),
- depth=(0.0, 5.0),
- strike=(0, 180.0),
- strike1=(0, 180.0),
- strike2=(0, 180.0),
- dip=(45.0, 90.0),
- dip1=(45.0, 90.0),
- dip2=(45.0, 90.0),
- rake=(-90.0, 90.0),
- rake1=(-90.0, 90.0),
- rake2=(-90.0, 90.0),
- length=(5.0, 30.0),
- width=(5.0, 20.0),
- slip=(0.1, 8.0),
- nucleation_x=(-1.0, 1.0),
- nucleation_y=(-1.0, 1.0),
- opening_fraction=(0.0, 0.0),
- magnitude=(4.0, 7.0),
- mnn=mdiag,
- mee=mdiag,
- mdd=mdiag,
- mne=moffdiag,
- mnd=moffdiag,
- med=moffdiag,
- fn=sf_force,
- fe=sf_force,
- fd=sf_force,
- exx=(-200.0, 200.0),
- eyy=(-200.0, 200.0),
- exy=(-200.0, 200.0),
- rotation=(-200.0, 200.0),
- w=(-3.0 / 8.0 * num.pi, 3.0 / 8.0 * num.pi),
- v=(-1.0 / 3, 1.0 / 3.0),
- kappa=(0.0, 2 * num.pi),
- sigma=(-num.pi / 2.0, num.pi / 2.0),
- h=(0.0, 1.0),
- volume_change=(1e8, 1e10),
- diameter=(5.0, 10.0),
- sign=(-1.0, 1.0),
- mix=(0, 1),
- time=(-5.0, 5.0),
- time_shift=(-5.0, 5.0),
- delta_time=(0.0, 10.0),
- delta_depth=(0.0, 10.0),
- distance=(0.0, 10.0),
- duration=(1.0, 30.0),
- peak_ratio=(0.0, 1.0),
- durations=(0.5, 29.5),
- uparr=(-0.05, 6.0),
- uperp=(-0.3, 4.0),
- utens=(0.0, 0.0),
- nucleation_strike=(0.0, 10.0),
- nucleation_dip=(0.0, 7.0),
- velocities=(0.5, 4.2),
- azimuth=(0, 180),
- amplitude=(1e10, 1e20),
- bl_azimuth=(0, 180),
- bl_amplitude=(0.0, 0.1),
- locking_depth=(1.0, 10.0),
- hypers=(-2.0, 6.0),
- ramp=(-0.005, 0.005),
- offset=(-0.05, 0.05),
- lat=(30.0, 30.5),
- lon=(30.0, 30.5),
- omega=(0.5, 0.6),
+derived_variables_mapping.update(
+ {source_name: ["magnitude"] for source_name in bem_source_catalog.keys()}
)
-default_seis_std = 1.0e-6
-default_geo_std = 1.0e-3
-default_decimation_factors = {"polarity": 1, "geodetic": 4, "seismic": 2}
+hyper_name_laplacian = "h_laplacian"
response_file_name = "responses.pkl"
geodetic_data_name = "geodetic_data.pkl"
@@ -234,14 +135,11 @@ def multi_event_seismic_data_name(nevent=0):
if nevent == 0:
return seismic_data_name
else:
- return "seismic_data_subevent_{}.pkl".format(nevent)
+ return f"seismic_data_subevent_{nevent}.pkl"
def multi_event_stations_name(nevent=0):
- if nevent == 0:
- return stations_name
- else:
- return "stations_subevent_{}.txt".format(nevent)
+ return stations_name if nevent == 0 else f"stations_subevent_{nevent}.txt"
linear_gf_dir_name = "linear_gfs"
@@ -262,7 +160,7 @@ def multi_event_stations_name(nevent=0):
_interpolation_choices = ["nearest_neighbor", "multilinear"]
_structure_choices = available_noise_structures()
_structure_choices_2d = available_noise_structures_2d()
-_mode_choices = [geometry_mode_str, ffi_mode_str]
+_mode_choices = [geometry_mode_str, ffi_mode_str, bem_mode_str]
_regularization_choices = ["laplacian", "none"]
_correlation_function_choices = ["nearest_neighbor", "gaussian", "exponential"]
_discretization_choices = ["uniform", "resolution"]
@@ -270,6 +168,7 @@ def multi_event_stations_name(nevent=0):
_backend_choices = ["csv", "bin"]
_datatype_choices = ["geodetic", "seismic", "polarity"]
_sampler_choices = ["PT", "SMC", "Metropolis"]
+_slip_component_choices = ("strike", "dip", "normal")
class InconsistentParameterNaming(Exception):
@@ -300,21 +199,17 @@ def __str__(self):
return "\n%s\n%s" % (self.errmess, self.context)
-class GFConfig(Object):
+class MediumConfig(Object):
"""
- Base config for GreensFunction calculation parameters.
+ Base class for subsurface medium configuration
"""
- store_superdir = String.T(
- default="./",
- help="Absolute path to the directory where Greens Function"
- " stores are located",
- )
reference_model_idx = Int.T(
default=0,
help="Index to velocity model to use for the optimization."
" 0 - reference, 1..n - model of variations",
)
+ sample_rate = Float.T(default=0, optional=True)
n_variations = Tuple.T(
2,
Int.T(),
@@ -323,6 +218,18 @@ class GFConfig(Object):
"Important for the calculation of the model prediction covariance"
" matrix with respect to uncertainties in the velocity model.",
)
+
+
+class GFConfig(MediumConfig):
+ """
+ Base config for layered GreensFunction calculation parameters.
+ """
+
+ store_superdir = String.T(
+ default="./",
+ help="Absolute path to the directory where Greens Function"
+ " stores are located",
+ )
earth_model_name = String.T(
default="ak135-f-continental.f",
help="Name of the reference earthmodel, see "
@@ -467,7 +374,6 @@ class DiscretizationConfig(Object):
class UniformDiscretizationConfig(DiscretizationConfig):
-
patch_widths = List.T(
Float.T(),
default=[5.0],
@@ -585,7 +491,6 @@ class LinearGFConfig(GFConfig):
)
def __init__(self, **kwargs):
-
kwargs = _init_kwargs(
method_config_name="discretization_config",
method_name="discretization",
@@ -620,7 +525,6 @@ class SeismicLinearGFConfig(LinearGFConfig):
)
def __init__(self, **kwargs):
-
Object.__init__(self, **kwargs)
if self.discretization == "resolution":
@@ -667,8 +571,7 @@ class WaveformFitConfig(Object):
interpolation = StringChoice.T(
choices=_interpolation_choices,
default="multilinear",
- help="GF interpolation scheme. Choices: %s"
- % utility.list2string(_interpolation_choices),
+ help=f"GF interpolation scheme. Choices: {utility.list2string(_interpolation_choices)}",
)
arrival_taper = trace.Taper.T(
default=ArrivalTaper.D(),
@@ -686,7 +589,6 @@ class WaveformFitConfig(Object):
class SeismicNoiseAnalyserConfig(Object):
-
structure = StringChoice.T(
choices=_structure_choices,
default="variance",
@@ -700,7 +602,6 @@ class SeismicNoiseAnalyserConfig(Object):
class GeodeticNoiseAnalyserConfig(Object):
-
structure = StringChoice.T(
choices=_structure_choices_2d,
default="import",
@@ -743,14 +644,11 @@ class SeismicConfig(Object):
gf_config = GFConfig.T(default=SeismicGFConfig.D())
def __init__(self, **kwargs):
-
waveforms = "waveforms"
wavenames = kwargs.pop("wavenames", ["any_P"])
wavemaps = []
if waveforms not in kwargs:
- for wavename in wavenames:
- wavemaps.append(WaveformFitConfig(name=wavename))
-
+ wavemaps.extend(WaveformFitConfig(name=wavename) for wavename in wavenames)
kwargs[waveforms] = wavemaps
mode = kwargs.pop("mode", geometry_mode_str)
@@ -779,10 +677,7 @@ def get_hypernames(self):
hids = []
for i, wc in enumerate(self.waveforms):
if wc.include:
- for c in wc.channels:
- hypername = "_".join(("h", wc.name, str(i), c))
- hids.append(hypername)
-
+ hids.extend("_".join(("h", wc.name, str(i), c)) for c in wc.channels)
return hids
def get_station_blacklist(self):
@@ -793,10 +688,7 @@ def get_station_blacklist(self):
return list(set(blacklist))
def get_hierarchical_names(self):
- if self.station_corrections:
- return ["time_shift"]
- else:
- return []
+ return ["time_shift"] if self.station_corrections else []
def init_waveforms(self, wavenames=["any_P"]):
"""
@@ -807,7 +699,6 @@ def init_waveforms(self, wavenames=["any_P"]):
class PolarityGFConfig(NonlinearGFConfig):
-
code = String.T(
default="cake", help="Raytracing code to use for takeoff-angle computations."
)
@@ -827,7 +718,6 @@ class PolarityGFConfig(NonlinearGFConfig):
class PolarityFitConfig(Object):
-
name = String.T(default="any_P", help="Seismic phase name for picked polarities")
include = Bool.T(
default=True, help="Whether to include this FitConfig to the estimation."
@@ -851,7 +741,6 @@ class PolarityFitConfig(Object):
class PolarityConfig(Object):
-
datadir = String.T(default="./")
waveforms = List.T(
PolarityFitConfig.T(default=PolarityFitConfig.D()),
@@ -860,7 +749,6 @@ class PolarityConfig(Object):
gf_config = GFConfig.T(default=PolarityGFConfig.D())
def __init__(self, **kwargs):
-
waveforms = "waveforms"
wavenames = kwargs.pop("wavenames", ["any_P"])
wavemaps = []
@@ -912,7 +800,6 @@ def init_waveforms(self, wavenames=["any_P"]):
class CorrectionConfig(Object):
-
dataset_names = List.T(
String.T(), default=[], help="Datasets to include in the correction."
)
@@ -939,7 +826,6 @@ def check_consistency(self):
class GNSSCorrectionConfig(CorrectionConfig):
-
station_blacklist = List.T(
String.T(), default=[], help="GNSS station names to apply no correction."
)
@@ -948,7 +834,7 @@ class GNSSCorrectionConfig(CorrectionConfig):
)
def get_hierarchical_names(self, name=None, number=0):
- return ["{}_{}".format(number, suffix) for suffix in self.get_suffixes()]
+ return [f"{number}_{suffix}" for suffix in self.get_suffixes()]
class EulerPoleConfig(GNSSCorrectionConfig):
@@ -994,7 +880,7 @@ def feature(self):
def get_hierarchical_names(self, name, number=0):
return [
- "{}_{}".format(name, suffix)
+ f"{name}_{suffix}"
for suffix in self.get_suffixes()
if name in self.dataset_names
]
@@ -1018,17 +904,13 @@ class GeodeticCorrectionsConfig(Object):
def iter_corrections(self):
out_corr = [self.ramp]
- for euler_pole_conf in self.euler_poles:
- out_corr.append(euler_pole_conf)
-
- for strain_conf in self.strain_rates:
- out_corr.append(strain_conf)
-
+ out_corr.extend(iter(self.euler_poles))
+ out_corr.extend(iter(self.strain_rates))
return out_corr
@property
def has_enabled_corrections(self):
- return any([corr.enabled for corr in self.iter_corrections()])
+ return any(corr.enabled for corr in self.iter_corrections())
class DatasetConfig(Object):
@@ -1051,7 +933,6 @@ def load_data(self):
class GNSSDatasetConfig(DatasetConfig):
-
components = List.T(String.T(), default=["north", "east", "up"])
blacklist = List.T(
String.T(),
@@ -1064,7 +945,7 @@ def load_data(self, campaign=False):
all_targets = []
for filename in self.names:
- logger.info("Loading file %s ..." % filename)
+ logger.info(f"Loading file {filename} ...")
try:
targets = load_and_blacklist_gnss(
self.datadir,
@@ -1074,14 +955,14 @@ def load_data(self, campaign=False):
components=self.components,
)
if targets:
- logger.info("Successfully loaded GNSS data from file %s" % filename)
+ logger.info(f"Successfully loaded GNSS data from file {filename}")
if campaign:
all_targets.append(targets)
else:
all_targets.extend(targets)
except OSError:
logger.warning(
- "GNSS of file %s not conform with ascii format!" % filename
+ f"GNSS of file {filename} not conform with ascii format!"
)
return all_targets
@@ -1120,10 +1001,9 @@ class GeodeticConfig(Object):
"If false one hyperparameter for each DATATYPE and "
"displacement COMPONENT.",
)
- gf_config = GFConfig.T(default=GeodeticGFConfig.D())
+ gf_config = MediumConfig.T(default=GeodeticGFConfig.D())
def __init__(self, **kwargs):
-
mode = kwargs.pop("mode", geometry_mode_str)
if mode == geometry_mode_str:
@@ -1140,7 +1020,6 @@ def get_hypernames(self):
return ["_".join(("h", typ)) for typ in self.types]
def get_hierarchical_names(self, datasets=None):
-
out_names = []
for number, corr_conf in enumerate(self.corrections_config.iter_corrections()):
if corr_conf.enabled:
@@ -1213,12 +1092,10 @@ def _init_kwargs(method_config_name, method_name, method_catalog, kwargs):
if method and not method_config:
kwargs[method_config_name] = method_catalog[method]()
- elif method and method_config:
+ elif method:
wanted_config = method_catalog[method]
if not isinstance(method_config, wanted_config):
- logger.info(
- "%s method changed!" " Initializing new config..." % method_name
- )
+ logger.info(f"{method_name} method changed! Initializing new config...")
kwargs[method_config_name] = wanted_config()
else:
kwargs[method_config_name] = method_config
@@ -1230,7 +1107,6 @@ def _init_kwargs(method_config_name, method_name, method_catalog, kwargs):
class FFIConfig(ModeConfig):
-
regularization = StringChoice.T(
default="none",
choices=_regularization_choices,
@@ -1266,7 +1142,6 @@ class FFIConfig(ModeConfig):
)
def __init__(self, **kwargs):
-
kwargs = _init_kwargs(
method_config_name="regularization_config",
method_name="regularization",
@@ -1277,6 +1152,70 @@ def __init__(self, **kwargs):
Object.__init__(self, **kwargs)
+class BoundaryCondition(Object):
+ slip_component = StringChoice.T(
+ choices=_slip_component_choices,
+ default="normal",
+ help=f"Slip-component for Green's Function calculation, maybe {list2string(_slip_component_choices)} ",
+ )
+ source_idxs = List.T(
+ Int.T(),
+ default=[0],
+ help="Indices for the sources that are causing the stress.",
+ )
+ receiver_idxs = List.T(
+ Int.T(), default=[0], help="Indices for the sources that receive the stress."
+ )
+
+
+class BoundaryConditions(Object):
+ conditions = Dict.T(
+ String.T(),
+ BoundaryCondition.T(),
+ default={
+ "strike": BoundaryCondition.D(slip_component="strike"),
+ "dip": BoundaryCondition.D(slip_component="dip"),
+ "normal": BoundaryCondition.D(slip_component="normal"),
+ },
+ )
+
+ def iter_conditions(self):
+ yield from self.conditions.values()
+
+ def get_traction_field(self, discretized_sources):
+ if len(self.conditions) != 3:
+ raise ValueError(
+ "One boundary condition for each slip component needs to be defined."
+ )
+
+ traction_vecs = []
+ for slip_comp in _slip_component_choices:
+ bcond = self.conditions[slip_comp]
+ for receiver_idx in bcond.receiver_idxs:
+ receiver_mesh = discretized_sources[receiver_idx]
+ t_vec = receiver_mesh.get_traction_vector(slip_comp)
+ traction_vecs.append(t_vec)
+
+ return num.hstack(traction_vecs)
+
+
+class BEMConfig(MediumConfig):
+ poissons_ratio = Float.T(default=0.25, help="Poisson's ratio")
+ shear_modulus = Float.T(default=33e9, help="Shear modulus [Pa]")
+ earth_model_name = String.T(default="homogeneous-elastic-halfspace")
+ mesh_size = Float.T(
+ default=0.5,
+ help="Determines the size of triangles [km], the smaller the finer the discretization.",
+ )
+ check_mesh_intersection = Bool.T(
+ default=True, help="If meshes intersect reject sample."
+ )
+ boundary_conditions = BoundaryConditions.T(
+ default=BoundaryConditions.D(),
+ help="Boundary conditions for the interaction matrix and imposed traction field.",
+ )
+
+
def get_parameter(variable, nvars=1, lower=1, upper=2):
return Parameter(
name=variable,
@@ -1288,6 +1227,115 @@ def get_parameter(variable, nvars=1, lower=1, upper=2):
)
+class DatatypeParameterMapping(Object):
+ sources_variables = List.T(Dict.T(String.T(), Int.T()))
+ n_sources = Int.T()
+
+ def __init__(self, **kwargs):
+ Object.__init__(self, **kwargs)
+
+ self._mapping = None
+ self.point_to_sources_mapping()
+
+ def __getitem__(self, k):
+ if self._mapping is None:
+ self.point_to_sources_mapping()
+
+ if k not in self._mapping.keys():
+ raise KeyError("Parameters mapping does not contain parameters:", k)
+
+ return self._mapping[k]
+
+ def point_to_sources_mapping(self) -> TDict[str, TList[int]]:
+ """
+ Mapping for mixed source setups. Mapping source parameter name to source indexes.
+ Is used by utilit.split_point to split the full point into subsource_points.
+ """
+ if self._mapping is None:
+ start_idx = 0
+ total_variables = {}
+ for source_variables in self.sources_variables:
+ for variable, size in source_variables.items():
+ end_idx = size + start_idx
+ source_idxs = list(range(start_idx, end_idx))
+ if variable in total_variables:
+ total_variables[variable].extend(source_idxs)
+ else:
+ total_variables[variable] = source_idxs
+
+ start_idx += size
+
+ self._mapping = total_variables
+
+ return self._mapping
+
+ def point_variable_names(self) -> TList[int]:
+ return self.point_to_sources_mapping().keys()
+
+ def total_variables_sizes(self) -> TDict[str, int]:
+ mapping = self.point_to_sources_mapping()
+ variables_sizes = {}
+ for variable, idxs in mapping.items():
+ variables_sizes[variable] = len(idxs)
+
+ return variables_sizes
+
+
+class SourcesParameterMapping(Object):
+ """
+ Mapping for source parameters to point of variables.
+ """
+
+ source_types = List.T(String.T(), default=[])
+ n_sources = List.T(Int.T(), default=[])
+ datatypes = List.T(StringChoice.T(choices=_datatype_choices), default=[])
+ mappings = Dict.T(String.T(), DatatypeParameterMapping.T())
+
+ def __init__(self, **kwargs):
+ Object.__init__(self, **kwargs)
+
+ for datatype in self.datatypes:
+ self.mappings[datatype] = None
+
+ def add(self, sources_variables: TDict = {}, datatype: str = "geodetic"):
+ if datatype in self.mappings:
+ self.mappings[datatype] = DatatypeParameterMapping(
+ sources_variables=sources_variables, n_sources=sum(self.n_sources)
+ )
+ else:
+ raise ValueError(
+ "Datatype for the source mapping has not been initialized!"
+ )
+
+ def __getitem__(self, k):
+ if k not in self.mappings.keys():
+ raise KeyError(k)
+
+ return self.mappings[k]
+
+ def unique_variables_sizes(self) -> TDict[str, int]:
+ """
+ Combine source specific variable dicts into a common setup dict
+
+ Raises:
+ ValueError: if no source specific dicts exist
+
+ Returns:
+ Dict: of variable names and their combined sizes
+ """
+
+ if len(self.mappings) == 0:
+ raise ValueError(
+ "Mode and datatype combination not implemented"
+ " or not resolvable with given datatypes."
+ )
+ unique_variables = {}
+ for datatype_parameter_mapping in self.mappings.values():
+ unique_variables.update(datatype_parameter_mapping.total_variables_sizes())
+
+ return unique_variables
+
+
class ProblemConfig(Object):
"""
Config for optimization problem to setup.
@@ -1301,11 +1349,13 @@ class ProblemConfig(Object):
mode_config = ModeConfig.T(
optional=True, help="Global optimization mode specific parameters."
)
- source_type = StringChoice.T(
- default="RectangularSource",
- choices=source_names,
- help="Source type to optimize for. Choices: %s"
- % (", ".join(name for name in source_names)),
+ source_types = List.T(
+ StringChoice.T(
+ default="RectangularSource",
+ choices=all_source_names,
+ help="Source types to optimize for. BEMSources and Sources cannot be mixed. Choices: %s"
+ % (", ".join(name for name in all_source_names)),
+ ),
)
stf_type = StringChoice.T(
default="HalfSinusoid",
@@ -1318,7 +1368,9 @@ class ProblemConfig(Object):
optional=True,
help="Determines the reduction of discretization of an extended" " source.",
)
- n_sources = Int.T(default=1, help="Number of Sub-sources to solve for")
+ n_sources = List.T(
+ Int.T(), default=[1], help="List of number of sub-sources for each source-type"
+ )
datatypes = List.T(default=["geodetic"])
hyperparameters = Dict.T(
default=OrderedDict(),
@@ -1335,19 +1387,18 @@ class ProblemConfig(Object):
)
def __init__(self, **kwargs):
-
mode = "mode"
- mode_config = "mode_config"
if mode in kwargs:
omode = kwargs[mode]
if omode == ffi_mode_str:
+ mode_config = "mode_config"
if mode_config not in kwargs:
kwargs[mode_config] = FFIConfig()
Object.__init__(self, **kwargs)
- def init_vars(self, variables=None, nvars=None):
+ def init_vars(self, variables=None, sizes=None):
"""
Initiate priors based on the problem mode and datatypes.
@@ -1357,21 +1408,12 @@ def init_vars(self, variables=None, nvars=None):
of str of variable names to initialise
"""
if variables is None:
- variables = self.select_variables()
+ mapping = self.get_variables_mapping()
self.priors = OrderedDict()
-
- for variable in variables:
-
- if nvars is None:
- if variable in block_vars:
- nvars = 1
- else:
- nvars = self.n_sources
-
- lower = default_bounds[variable][0]
- upper = default_bounds[variable][1]
- self.priors[variable] = get_parameter(variable, nvars, lower, upper)
+ for variable, size in mapping.unique_variables_sizes().items():
+ lower, upper = defaults[variable].default_bounds
+ self.priors[variable] = get_parameter(variable, size, lower, upper)
def set_vars(self, bounds_dict, attribute="priors", init=False):
"""
@@ -1381,9 +1423,7 @@ def set_vars(self, bounds_dict, attribute="priors", init=False):
upd_dict = getattr(self, attribute)
if variable in list(upd_dict.keys()) or init:
if init:
- logger.info(
- 'Initialising new variable "%s" in %s' % (variable, attribute)
- )
+ logger.info(f"Initialising new variable {variable} in {attribute}")
param = get_parameter(variable, nvars=len(bounds[0]))
upd_dict[variable] = param
else:
@@ -1403,58 +1443,65 @@ def set_vars(self, bounds_dict, attribute="priors", init=False):
setattr(self, attribute, upd_dict)
- def select_variables(self):
+ def get_variables_mapping(self):
"""
Return model variables depending on problem config.
"""
if self.mode not in modes_catalog.keys():
- raise ValueError("Problem mode %s not implemented" % self.mode)
+ raise ValueError(f"Problem mode {self.mode} not implemented")
vars_catalog = modes_catalog[self.mode]
-
- variables = []
for datatype in self.datatypes:
- if datatype in vars_catalog.keys():
- if self.mode == geometry_mode_str:
- if self.source_type in vars_catalog[datatype].keys():
- source = vars_catalog[datatype][self.source_type]
- svars = set(source.keys())
-
- if isinstance(source(), (PyrockoRS, gf.ExplosionSource)):
- svars.discard("magnitude")
+ if datatype not in vars_catalog.keys():
+ raise ValueError(
+ f"""Datatype {datatype} not supported for type of problem!
+ Supported datatype are: {list2string(vars_catalog.keys())}"""
+ )
- variables += utility.weed_input_rvs(svars, self.mode, datatype)
- else:
+ mapping = SourcesParameterMapping(
+ source_types=self.source_types,
+ datatypes=self.datatypes,
+ n_sources=self.n_sources,
+ )
+ for datatype in self.datatypes:
+ if self.mode in [geometry_mode_str, bem_mode_str]:
+ list_variables = []
+ for source_type, n_source in zip(self.source_types, self.n_sources):
+ variables = {}
+ supported_sources = vars_catalog[datatype].keys()
+ if source_type not in supported_sources:
raise ValueError(
- "Source Type not supported for type"
- " of problem, and datatype!"
+ f"Source Type {source_type} not supported for type"
+ f" of problem, and datatype '{datatype}'!"
+ f" Supported sources: {list2string(supported_sources)}"
)
- if datatype == "seismic":
- if self.stf_type in stf_catalog.keys():
- stf = stf_catalog[self.stf_type]
- variables += utility.weed_input_rvs(
- set(stf.keys()), self.mode, datatype
- )
- else:
- variables += vars_catalog[datatype]
- else:
- raise ValueError(
- "Datatype %s not supported for type of"
- " problem! Supported datatype are: %s"
- % (datatype, ", ".join('"%s"' % d for d in vars_catalog.keys()))
- )
+ source = vars_catalog[datatype][source_type]
+ if datatype == "seismic" and self.stf_type in stf_catalog.keys():
+ stf = stf_catalog[self.stf_type]
+ else:
+ stf = {}
- unique_variables = utility.unique_list(variables)
+ source_varnames = set(list(source.keys()) + list(stf.keys()))
+ if isinstance(source(), (PyrockoRS, gf.ExplosionSource)):
+ source_varnames.discard("magnitude")
- if len(unique_variables) == 0:
- raise Exception(
- "Mode and datatype combination not implemented"
- " or not resolvable with given datatypes."
- )
+ for varname in source_varnames:
+ variables[varname] = n_source
- return unique_variables
+ variables = utility.weed_input_rvs(variables, self.mode, datatype)
+ list_variables.append(variables)
+
+ mapping.add(list_variables, datatype=datatype)
+ else:
+ variables = {}
+ for varname in vars_catalog[datatype]:
+ variables[varname] = self.n_sources[0]
+
+ mapping.add([variables], datatype=datatype)
+
+ return mapping
def get_random_variables(self):
"""
@@ -1463,41 +1510,32 @@ def get_random_variables(self):
Returns
-------
rvs : dict
- variable random variables
+ random variable names and their kwargs
fixed_params : dict
fixed random parameters
"""
- from pymc3 import Uniform
- logger.debug("Optimization for %i sources", self.n_sources)
+ logger.debug("Optimization for %s sources", list2string(self.n_sources))
- rvs = dict()
- fixed_params = dict()
+ rvs = {}
+ fixed_params = {}
for param in self.priors.values():
if not num.array_equal(param.lower, param.upper):
-
- shape = self.get_parameter_shape(param)
+ size = self.get_parameter_size(param)
kwargs = dict(
name=param.name,
- shape=num.sum(shape),
- lower=param.get_lower(shape),
- upper=param.get_upper(shape),
- testval=param.get_testvalue(shape),
+ shape=(num.sum(size),),
+ lower=param.get_lower(size),
+ upper=param.get_upper(size),
+ initval=param.get_testvalue(size),
transform=None,
dtype=tconfig.floatX,
)
- try:
- rvs[param.name] = Uniform(**kwargs)
-
- except TypeError:
- kwargs.pop("name")
- rvs[param.name] = Uniform.dist(**kwargs)
-
+ rvs[param.name] = kwargs
else:
logger.info(
- "not solving for %s, got fixed at %s"
- % (param.name, utility.list2string(param.lower.flatten()))
+ f"not solving for {param.name}, got fixed at {utility.list2string(param.lower.flatten())}"
)
fixed_params[param.name] = param.lower
@@ -1519,10 +1557,11 @@ def set_decimation_factor(self):
Determines the reduction of discretization of an extended source.
Influences yet only the RectangularSource.
"""
- if self.source_type == "RectangularSource":
- self.decimation_factors = {}
- for datatype in self.datatypes:
- self.decimation_factors[datatype] = default_decimation_factors[datatype]
+ if "RectangularSource" in self.source_types:
+ self.decimation_factors = {
+ datatype: default_decimation_factors[datatype]
+ for datatype in self.datatypes
+ }
else:
self.decimation_factors = None
@@ -1537,8 +1576,8 @@ def _validate_parameters(self, dict_name=None):
"""
d = getattr(self, dict_name)
- double_check = []
if d is not None:
+ double_check = []
for name, param in d.items():
param.validate_bounds()
if name not in double_check:
@@ -1546,13 +1585,11 @@ def _validate_parameters(self, dict_name=None):
raise InconsistentParameterNaming(name, param.name, self.mode)
double_check.append(name)
else:
- raise ValueError(
- "Parameter %s not unique in %s!".format(name, dict_name)
- )
+ raise ValueError("Parameter %s not unique in %s!".format())
- logger.info("All {} ok!".format(dict_name))
+ logger.info(f"All {dict_name} ok!")
else:
- logger.info("No {} defined!".format(dict_name))
+ logger.info(f"No {dict_name} defined!")
def validate_all(self):
"""
@@ -1586,8 +1623,8 @@ def get_test_point(self):
"""
test_point = {}
for varname, var in self.priors.items():
- shape = self.get_parameter_shape(var)
- test_point[varname] = var.get_testvalue(shape)
+ size = self.get_parameter_size(var)
+ test_point[varname] = var.get_testvalue(size)
for varname, var in self.hyperparameters.items():
test_point[varname] = var.get_testvalue()
@@ -1597,62 +1634,68 @@ def get_test_point(self):
return test_point
- def get_parameter_shape(self, param):
- if self.mode == ffi_mode_str:
- if param.name in hypo_vars:
- shape = self.n_sources
- elif param.name not in hypo_vars and self.mode_config.npatches:
- shape = self.mode_config.subfault_npatches
- if len(shape) == 0:
- shape = self.mode_config.npatches
- else:
- shape = param.dimension
+ def get_parameter_size(self, param):
+ if self.mode == ffi_mode_str and param.name in hypo_vars:
+ size = self.n_sources[0]
+ elif self.mode == ffi_mode_str and self.mode_config.npatches:
+ size = self.mode_config.subfault_npatches
+ if len(size) == 0:
+ size = self.mode_config.npatches
+ elif self.mode in [ffi_mode_str, geometry_mode_str, bem_mode_str]:
+ size = param.dimension
- elif self.mode == geometry_mode_str:
- shape = param.dimension
else:
- raise TypeError("Mode not implemented: %s" % self.mode)
+ raise TypeError(f"Mode not implemented: {self.mode}")
- return shape
+ return size
def get_derived_variables_shapes(self):
+ """
+ Get variable names and shapes of derived variables of the problem.
- source_type = self.source_type
+ Returns:
+ list: varnames
+ list: of tuples of ints (shapes)
+ """
tpoint = self.get_test_point()
has_pole, _ = check_point_keys(tpoint, phrase="*_pole_lat")
- if has_pole:
- source_type += "Pole"
+ derived = {}
+ for source_type, n_source in zip(self.source_types, self.n_sources):
+ if has_pole:
+ source_type += "Pole"
- try:
- varnames = derived_variables_mapping[source_type]
- shapes = []
- for varname in varnames:
- if self.mode == geometry_mode_str:
- shape = (self.n_sources,)
- elif self.mode == ffi_mode_str:
- if varname == "magnitude":
- shape = (1,)
+ try:
+ shapes = []
+ source_varnames = derived_variables_mapping[source_type]
+ for varname in source_varnames:
+ if self.mode in [geometry_mode_str, bem_mode_str]:
+ shape = n_source
+ elif self.mode == ffi_mode_str:
+ shape = (
+ 1 if varname == "magnitude" else self.mode_config.npatches
+ )
else:
- shape = (self.mode_config.npatches,)
+ raise ValueError("Mode '%s' is not supported!" % self.mode)
- shapes.append(shape)
+ if varname in derived:
+ derived[varname] += shape
+ else:
+ derived[varname] = shape
- logger.info(
- "Adding derived variables %s with shapes %s to "
- "trace." % (list2string(varnames), list2string(shapes))
- )
- except KeyError:
- logger.info("No derived variables for %s" % source_type)
- varnames = []
- shapes = []
+ except KeyError:
+ logger.info(f"No derived variables for {source_type}")
+ shapes = [(shape,) for shape in derived.values()]
+ varnames = list(derived.keys())
+ logger.info(
+ f"Adding derived variables {list2string(varnames)} with shapes {list2string(shapes)} to trace."
+ )
return varnames, shapes
class SamplerParameters(Object):
-
tune_interval = Int.T(
default=50, help="Tune interval for adaptive tuning of Metropolis step size."
)
@@ -1670,7 +1713,6 @@ class SamplerParameters(Object):
class ParallelTemperingConfig(SamplerParameters):
-
n_samples = Int.T(
default=int(1e5),
help="Number of samples of the posterior distribution."
@@ -1823,7 +1865,6 @@ class SamplerConfig(Object):
)
def __init__(self, **kwargs):
-
kwargs = _init_kwargs(
method_config_name="parameters",
method_name="name",
@@ -1872,11 +1913,10 @@ class SeismicGFLibraryConfig(GFLibaryConfig):
@property
def _mapid(self):
- if hasattr(self, "mapnumber"):
- if self.mapnumber is not None:
- return "_".join((self.wave_config.name, str(self.mapnumber)))
- else:
+ if not hasattr(self, "mapnumber"):
return self.wave_config.name
+ if self.mapnumber is not None:
+ return "_".join((self.wave_config.name, str(self.mapnumber)))
datatype_catalog = {
@@ -1919,27 +1959,26 @@ def update_hypers(self):
hypernames = []
for datatype in _datatype_choices:
- datatype_conf = getattr(self, "%s_config" % datatype)
+ datatype_conf = getattr(self, f"{datatype}_config")
if datatype_conf is not None:
hypernames.extend(datatype_conf.get_hypernames())
- if self.problem_config.mode == ffi_mode_str:
- if self.problem_config.mode_config.regularization == "laplacian":
- hypernames.append(hyper_name_laplacian)
+ if (
+ self.problem_config.mode == ffi_mode_str
+ and self.problem_config.mode_config.regularization == "laplacian"
+ ):
+ hypernames.append(hyper_name_laplacian)
hypers = OrderedDict()
+ defaultb_name = "hypers"
for name in hypernames:
- logger.info("Added hyperparameter %s to config and " "model setup!" % name)
-
- defaultb_name = "hypers"
+ logger.info(f"Added hyperparameter {name} to config and model setup!")
+ lower, upper = defaults[defaultb_name].default_bounds
hypers[name] = Parameter(
name=name,
- lower=num.ones(1, dtype=tconfig.floatX)
- * default_bounds[defaultb_name][0],
- upper=num.ones(1, dtype=tconfig.floatX)
- * default_bounds[defaultb_name][1],
- testvalue=num.ones(1, dtype=tconfig.floatX)
- * num.mean(default_bounds[defaultb_name]),
+ lower=num.ones(1, dtype=tconfig.floatX) * lower,
+ upper=num.ones(1, dtype=tconfig.floatX) * upper,
+ testvalue=num.ones(1, dtype=tconfig.floatX) * num.mean([lower, upper]),
)
self.problem_config.hyperparameters = hypers
@@ -1978,7 +2017,7 @@ def update_hierarchicals(self):
shp = 1
for name in hierarnames:
logger.info(
- "Added hierarchical parameter %s to config and " "model setup!" % name
+ f"Added hierarchical parameter {name} to config and model setup!"
)
if name == "time_shift":
@@ -1987,14 +2026,13 @@ def update_hierarchicals(self):
correction_name = name.split("_")[-1]
defaultb_name = correction_name
+ lower, upper = defaults[defaultb_name].default_bounds
hierarchicals[name] = Parameter(
name=name,
- lower=num.ones(shp, dtype=tconfig.floatX)
- * default_bounds[defaultb_name][0],
- upper=num.ones(shp, dtype=tconfig.floatX)
- * default_bounds[defaultb_name][1],
+ lower=num.ones(shp, dtype=tconfig.floatX) * lower,
+ upper=num.ones(shp, dtype=tconfig.floatX) * upper,
testvalue=num.ones(shp, dtype=tconfig.floatX)
- * num.mean(default_bounds[defaultb_name]),
+ * num.mean([lower, upper]),
)
self.problem_config.hierarchicals = hierarchicals
@@ -2024,8 +2062,7 @@ def init_reference_sources(source_points, n_sources, source_type, stf_type, even
rf = RectangularSource(stf=stf, anchor="top")
utility.update_source(rf, **source_points[i])
else:
- kwargs = {}
- kwargs["stf"] = stf
+ kwargs = {"stf": stf}
rf = RectangularSource.from_kite_source(source_points[i], kwargs=kwargs)
rf.nucleation_x = None
@@ -2050,8 +2087,8 @@ def init_config(
main_path="./",
datatypes=["geodetic"],
mode="geometry",
- source_type="RectangularSource",
- n_sources=1,
+ source_types=["RectangularSource"],
+ n_sources=[1],
waveforms=["any_P"],
sampler="SMC",
hyper_sampler="Metropolis",
@@ -2096,46 +2133,43 @@ def init_config(
:class:`BEATconfig`
"""
- def init_dataset_config(config, datatype):
-
+ def init_dataset_config(config, datatype, mode):
dconfig = datatype_catalog[datatype]()
- if hasattr(dconfig.gf_config, "reference_location"):
- if not individual_gfs:
- dconfig.gf_config.reference_location = ReferenceLocation(
- lat=10.0, lon=10.0
- )
- else:
- dconfig.gf_config.reference_location = None
+ if mode == bem_mode_str:
+ dconfig.gf_config = BEMConfig()
+ else:
+ if hasattr(dconfig.gf_config, "reference_location"):
+ if not individual_gfs:
+ dconfig.gf_config.reference_location = ReferenceLocation(
+ lat=10.0, lon=10.0
+ )
+ else:
+ dconfig.gf_config.reference_location = None
- if use_custom:
- logger.info(
- "use_custom flag set! The velocity model in the"
- " %s GF configuration has to be updated!" % datatype
- )
- dconfig.gf_config.custom_velocity_model = load_model().extract(
- depth_max=100.0 * km
- )
- dconfig.gf_config.use_crust2 = False
- dconfig.gf_config.replace_water = False
+ if use_custom:
+ logger.info(
+ "use_custom flag set! The velocity model in the"
+ " %s GF configuration has to be updated!" % datatype
+ )
+ dconfig.gf_config.custom_velocity_model = load_model().extract(
+ depth_max=100.0 * km
+ )
+ dconfig.gf_config.use_crust2 = False
+ dconfig.gf_config.replace_water = False
- config["%s_config" % datatype] = dconfig
+ config[f"{datatype}_config"] = dconfig
return config
c = BEATconfig(name=name, date=date)
c.project_dir = os.path.join(os.path.abspath(main_path), name)
- if mode == geometry_mode_str or mode == "interseismic":
- if date is not None and not mode == "interseismic":
- c.event = utility.search_catalog(date=date, min_magnitude=min_magnitude)
+ if mode in [geometry_mode_str, bem_mode_str]:
+ for datatype in datatypes:
+ init_dataset_config(c, datatype=datatype, mode=mode)
- elif mode == "interseismic":
- c.event = model.Event(lat=10.0, lon=10.0, depth=0.0)
- c.date = "dummy"
- logger.info(
- "Interseismic mode! Using event as reference for the"
- " stable block! Please update coordinates!"
- )
+ if date is not None and mode != bem_mode_str:
+ c.event = utility.search_catalog(date=date, min_magnitude=min_magnitude)
else:
logger.warn(
"No given date! Using dummy event!"
@@ -2145,12 +2179,11 @@ def init_dataset_config(config, datatype):
c.event = model.Event(duration=1.0)
c.date = "dummy"
- for datatype in datatypes:
- init_dataset_config(c, datatype=datatype)
-
elif mode == ffi_mode_str:
+ if len(source_types) > 1:
+ raise TypeError("FFI is not supported with mixed source types, yet.")
- if source_type != "RectangularSource":
+ if "RectangularSource" not in source_types:
raise TypeError(
"Distributed slip is so far only supported" " for RectangularSource(s)"
)
@@ -2165,24 +2198,24 @@ def init_dataset_config(config, datatype):
" stores for the non-linear problem." % geometry_mode_str
)
+ geometry_source_type = gmc.problem_config.source_types[0]
logger.info("Taking information from geometry_config ...")
- if source_type != gmc.problem_config.source_type:
+ if source_types[0] != geometry_source_type:
raise ValueError(
'Specified reference source: "%s" differs from the'
" source that has been used previously in"
- ' "geometry" mode: "%s"!'
- % (source_type, gmc.problem_config.source_type)
+ ' "geometry" mode: "%s"!' % (source_types[0], geometry_source_type)
)
n_sources = gmc.problem_config.n_sources
point = {k: v.testvalue for k, v in gmc.problem_config.priors.items()}
point = utility.adjust_point_units(point)
- source_points = utility.split_point(point)
+ source_points = utility.split_point(point, n_sources_total=n_sources[0])
reference_sources = init_reference_sources(
source_points,
- n_sources,
- gmc.problem_config.source_type,
+ n_sources[0],
+ geometry_source_type,
gmc.problem_config.stf_type,
event=gmc.event,
)
@@ -2237,7 +2270,7 @@ def init_dataset_config(config, datatype):
c.seismic_config.gf_config = lgf_config
c.problem_config = ProblemConfig(
- n_sources=n_sources, datatypes=datatypes, mode=mode, source_type=source_type
+ n_sources=n_sources, datatypes=datatypes, mode=mode, source_types=source_types
)
c.problem_config.init_vars()
c.problem_config.set_decimation_factor()
@@ -2266,7 +2299,7 @@ def dump_config(config):
----------
config : :class:`BEATConfig`
"""
- config_file_name = "config_" + config.problem_config.mode + ".yaml"
+ config_file_name = f"config_{config.problem_config.mode}.yaml"
conf_out = os.path.join(config.project_dir, config_file_name)
dump(config, filename=conf_out)
@@ -2289,14 +2322,14 @@ def load_config(project_dir, mode):
-------
:class:`BEATconfig`
"""
- config_file_name = "config_" + mode + ".yaml"
+ config_file_name = f"config_{mode}.yaml"
config_fn = os.path.join(project_dir, config_file_name)
try:
config = load(filename=config_fn)
except IOError:
- raise IOError("Cannot load config, file %s" " does not exist!" % config_fn)
+ raise IOError(f"Cannot load config, file {config_fn} does not exist!")
except (ArgumentError, TypeError):
raise ConfigNeedsUpdatingError()
diff --git a/beat/covariance.py b/beat/covariance.py
index 71167383..d2b1da9f 100644
--- a/beat/covariance.py
+++ b/beat/covariance.py
@@ -2,15 +2,13 @@
from time import time
import numpy as num
-from pymc3 import Point
from pyrocko import gf, trace
+from pytensor import config as tconfig
from scipy.linalg import toeplitz
from scipy.spatial import KDTree
-from theano import config as tconfig
from beat import heart
-from beat.utility import ensure_cov_psd, list2string, running_window_rms, distances
-
+from beat.utility import distances, ensure_cov_psd, list2string, running_window_rms
logger = logging.getLogger("covariance")
@@ -43,7 +41,7 @@ def exponential_data_covariance(n, dt, tzero):
Notes
-----
Cd(i,j) = (Variance of trace)*exp(-abs(ti-tj)/
- (shortest period T0 of waves))
+ (shortest period T0 of waves))
i,j are samples of the seismic trace
"""
@@ -170,13 +168,12 @@ def __init__(
config,
events=None,
):
-
avail = available_noise_structures_2d()
if config.structure not in avail:
raise AttributeError(
'Selected noise structure "%s" not supported! Implemented'
- " noise structures: %s" % (structure, list2string(avail))
+ " noise structures: %s" % (config.structure, list2string(avail))
)
self.events = events
@@ -194,7 +191,6 @@ def do_import(self, dataset):
)
def do_non_toeplitz(self, dataset, result):
-
if dataset.typ == "SAR":
dataset.update_local_coords(self.events[0])
coords = num.vstack([dataset.east_shifts, dataset.north_shifts]).T
@@ -265,7 +261,6 @@ def __init__(
sources=None,
chop_bounds=["b", "c"],
):
-
avail = available_noise_structures()
if structure not in avail:
raise AttributeError(
@@ -281,7 +276,6 @@ def __init__(
self.chop_bounds = chop_bounds
def get_structure(self, wmap, chop_bounds=None):
-
if chop_bounds is None:
chop_bounds = self.chop_bounds
@@ -298,7 +292,6 @@ def get_structure(self, wmap, chop_bounds=None):
return NoiseStructureCatalog[self.structure](n, dsample, tzero)
def do_import(self, wmap):
-
scalings = []
for tr, target in zip(wmap.datasets, wmap.targets):
scaling = import_data_covariance(
@@ -312,7 +305,6 @@ def do_import(self, wmap):
return scalings
def do_non_toeplitz(self, wmap, results):
-
if results is None:
ValueError(
"Results need(s) to be given for non-toeplitz" " covariance estimates!"
@@ -333,7 +325,6 @@ def do_non_toeplitz(self, wmap, results):
return scalings
def do_variance_estimate(self, wmap, chop_bounds=None):
-
filterer = wmap.config.filterer
scalings = []
@@ -438,6 +429,8 @@ def get_data_covariances(self, wmap, sample_rate, results=None, chop_bounds=None
def model_prediction_sensitivity(engine, *args, **kwargs):
"""
+ DEPRECATED!
+
Calculate the model prediction Covariance Sensitivity Kernel.
(numerical derivation with respect to the input source parameter(s))
Following Duputel et al. 2014
@@ -515,36 +508,34 @@ def model_prediction_sensitivity(engine, *args, **kwargs):
sources=calc_sources, targets=request.targets, nprocs=nprocs
)
- for k in range(len(request.targets)):
+ for i_k in range(len(request.targets)):
# zero padding if necessary
trc_lengths = num.array(
[
- len(response.results_list[i][k].trace.data)
+ len(response.results_list[i][i_k].trace.data)
for i in range(len(response.results_list))
]
)
Id = num.where(trc_lengths != trc_lengths.max())
- for l in Id[0]:
- response.results_list[l][k].trace.data = num.concatenate(
+ for i_l in Id[0]:
+ response.results_list[i_l][i_k].trace.data = num.concatenate(
(
- response.results_list[l][k].trace.data,
- num.zeros(trc_lengths.max() - trc_lengths[l]),
+ response.results_list[i_l][i_k].trace.data,
+ num.zeros(trc_lengths.max() - trc_lengths[i_l]),
)
)
# calculate numerical partial derivative for
# each source and target
- sensitivity_param_list[par_count][k] = sensitivity_param_list[
+ sensitivity_param_list[par_count][i_k] = sensitivity_param_list[
par_count
- ][k] + (
- -response.results_list[0][k].trace.data
- + 8 * response.results_list[1][k].trace.data
- - 8 * response.results_list[2][k].trace.data
- + response.results_list[3][k].trace.data
- ) / (
- 12 * h[par_count]
- )
+ ][i_k] + (
+ -response.results_list[0][i_k].trace.data
+ + 8 * response.results_list[1][i_k].trace.data
+ - 8 * response.results_list[2][i_k].trace.data
+ + response.results_list[3][i_k].trace.data
+ ) / (12 * h[par_count])
par_count = par_count + 1
@@ -552,7 +543,7 @@ def model_prediction_sensitivity(engine, *args, **kwargs):
par_count = 0
for param in source_params:
for k in range(len(request.targets)):
- sensitivity_param_trcs[par_count][k] = trace.Trace(
+ sensitivity_param_trcs[par_count][i_k] = trace.Trace(
network=request.targets[k].codes[0],
station=request.targets[k].codes[1],
ydata=sensitivity_param_list[par_count][k],
@@ -857,15 +848,16 @@ def non_toeplitz_covariance_2d(coords, data, max_dist_perc):
return toeplitz * stds[:, num.newaxis] * stds[num.newaxis, :]
-def init_proposal_covariance(bij, vars, model, pop_size=1000):
+def init_proposal_covariance(bij, population):
"""
Create initial proposal covariance matrix based on random samples
from the solution space.
"""
- population_array = num.zeros((pop_size, bij.ordering.size))
- for i in range(pop_size):
- point = Point({v.name: v.random() for v in vars}, model=model)
- population_array[i, :] = bij.map(point)
+ test_point = population[0]
+ q = bij.map(test_point)
+ population_array = num.zeros((len(population), q.data.size))
+ for i, point in enumerate(population):
+ population_array[i, :] = bij.map(point).data
return num.diag(population_array.var(0))
@@ -890,16 +882,19 @@ def calc_sample_covariance(buffer, lij, bij, beta):
"""
n_points = len(buffer)
- population_array = num.zeros((n_points, bij.ordering.size))
- for i, (lpoint, _) in enumerate(buffer):
- point = lij.l2d(lpoint)
- population_array[i, :] = bij.map(point)
+ point = lij.l2d(buffer[0][0])
+ point_array = bij.map(point).data
like_idx = lij.ordering["like"].list_ind
weights = num.array([lpoint[like_idx] for lpoint, _ in buffer])
temp_weights = num.exp((weights - weights.max())).ravel()
norm_weights = temp_weights / num.sum(temp_weights)
+ population_array = num.zeros((n_points, point_array.size))
+ for i, (lpoint, _) in enumerate(buffer):
+ point = lij.l2d(lpoint)
+ population_array[i, :] = bij.map(point).data
+
cov = num.cov(population_array, aweights=norm_weights, bias=False, rowvar=0)
cov = ensure_cov_psd(cov)
diff --git a/beat/defaults.py b/beat/defaults.py
new file mode 100644
index 00000000..9f505379
--- /dev/null
+++ b/beat/defaults.py
@@ -0,0 +1,318 @@
+import logging
+import os
+
+import numpy as num
+from pyrocko import util
+from pyrocko.config import expand
+from pyrocko.guts import Dict, Float, Object, String, Tuple, dump, load
+
+logger = logging.getLogger("pyrocko.config")
+
+guts_prefix = "pf"
+
+SQRT2 = num.sqrt(2)
+
+default_seis_std = 1.0e-6
+default_geo_std = 1.0e-3
+default_decimation_factors = {"polarity": 1, "geodetic": 4, "seismic": 2}
+
+beat_dir_tmpl = os.environ.get("BEAT_DIR", os.path.expanduser("~/.beat"))
+
+
+class Bounds(Object):
+ default_bounds = Tuple.T(2, Float.T(), default=(0, 1))
+ physical_bounds = Tuple.T(2, Float.T(), default=(0, 1))
+ unit = String.T(default="$[m]$")
+
+
+class ParameterDefaults(Object):
+ parameters = Dict.T(String.T(), Bounds.T())
+
+ def __getitem__(self, k):
+ if k not in self.parameters.keys():
+ raise KeyError(k)
+ return self.parameters[k]
+
+
+sf_force = (0, 1e10)
+moffdiag = (-1.0, 1.0)
+mdiag = (-SQRT2, SQRT2)
+
+
+# Bounds and Units for all parameters
+parameter_info = {
+ "east_shift": Bounds(
+ physical_bounds=(-500.0, 500.0), default_bounds=(-10.0, 10.0), unit="$[km]$"
+ ),
+ "north_shift": Bounds(
+ physical_bounds=(-500.0, 500.0), default_bounds=(-10.0, 10.0), unit="$[km]$"
+ ),
+ "depth": Bounds(
+ physical_bounds=(0.0, 1000.0), default_bounds=(0.0, 5.0), unit="$[km]$"
+ ),
+ "strike": Bounds(
+ physical_bounds=(-90.0, 420.0), default_bounds=(0, 180.0), unit=r"$[^\circ]$"
+ ),
+ "strike1": Bounds(
+ physical_bounds=(-90.0, 420.0), default_bounds=(0, 180.0), unit=r"$[^\circ]$"
+ ),
+ "strike2": Bounds(
+ physical_bounds=(-90.0, 420.0), default_bounds=(0, 180.0), unit=r"$[^\circ]$"
+ ),
+ "dip": Bounds(
+ physical_bounds=(-45.0, 135.0), default_bounds=(45.0, 90.0), unit=r"$[^\circ]$"
+ ),
+ "dip1": Bounds(
+ physical_bounds=(-45.0, 135.0), default_bounds=(45.0, 90.0), unit=r"$[^\circ]$"
+ ),
+ "dip2": Bounds(
+ physical_bounds=(-45.0, 135.0), default_bounds=(45.0, 90.0), unit=r"$[^\circ]$"
+ ),
+ "rake": Bounds(
+ physical_bounds=(-180.0, 270.0),
+ default_bounds=(-90.0, 90.0),
+ unit=r"$[^\circ]$",
+ ),
+ "rake1": Bounds(
+ physical_bounds=(-180.0, 270.0),
+ default_bounds=(-90.0, 90.0),
+ unit=r"$[^\circ]$",
+ ),
+ "rake2": Bounds(
+ physical_bounds=(-180.0, 270.0),
+ default_bounds=(-90.0, 90.0),
+ unit=r"$[^\circ]$",
+ ),
+ "mix": Bounds(physical_bounds=(0, 1), default_bounds=(0, 1), unit=""),
+ "volume_change": Bounds(
+ physical_bounds=(-1e12, 1e12), default_bounds=(1e8, 1e10), unit="$[m^3]$"
+ ),
+ "diameter": Bounds(
+ physical_bounds=(0.0, 100.0), default_bounds=(5.0, 10.0), unit="$[km]$"
+ ),
+ "slip": Bounds(
+ physical_bounds=(0.0, 150.0), default_bounds=(0.1, 8.0), unit="$[m]$"
+ ),
+ "opening_fraction": Bounds(
+ physical_bounds=moffdiag, default_bounds=(0.0, 0.0), unit=""
+ ),
+ "azimuth": Bounds(
+ physical_bounds=(0, 360), default_bounds=(0, 180), unit=r"$[^\circ]$"
+ ),
+ "amplitude": Bounds(
+ physical_bounds=(1.0, 10e25), default_bounds=(1e10, 1e20), unit="$[Nm]$"
+ ),
+ "locking_depth": Bounds(
+ physical_bounds=(0.1, 100.0), default_bounds=(1.0, 10.0), unit="$[km]$"
+ ),
+ "nucleation_dip": Bounds(
+ physical_bounds=(0.0, num.inf), default_bounds=(0.0, 7.0), unit="$[km]$"
+ ),
+ "nucleation_strike": Bounds(
+ physical_bounds=(0.0, num.inf), default_bounds=(0.0, 10.0), unit="$[km]$"
+ ),
+ "nucleation_x": Bounds(physical_bounds=moffdiag, default_bounds=moffdiag, unit=""),
+ "nucleation_y": Bounds(physical_bounds=moffdiag, default_bounds=moffdiag, unit=""),
+ "time_shift": Bounds(
+ physical_bounds=(-20.0, 20.0), default_bounds=(-5.0, 5.0), unit="$[s]$"
+ ),
+ "coupling": Bounds(physical_bounds=(0, 100), default_bounds=(0, 1), unit="[$\%$]"),
+ "uperp": Bounds(
+ physical_bounds=(-150.0, 150.0), default_bounds=(-0.3, 4.0), unit="$[m]$"
+ ),
+ "uparr": Bounds(
+ physical_bounds=(-1.0, 150.0), default_bounds=(-0.05, 6.0), unit="$[m]$"
+ ),
+ "utens": Bounds(
+ physical_bounds=(-150.0, 150.0), default_bounds=(0.0, 0.0), unit="$[m]$"
+ ),
+ "durations": Bounds(
+ physical_bounds=(0.0, 600.0), default_bounds=(0.5, 29.5), unit="$[s]$"
+ ),
+ "velocities": Bounds(
+ physical_bounds=(0.0, 20.0), default_bounds=(0.5, 4.2), unit="$[km/s]$"
+ ),
+ "fn": Bounds(
+ physical_bounds=(-1e20, 1e20), default_bounds=(-1e20, 1e20), unit="$[N]$"
+ ),
+ "fe": Bounds(
+ physical_bounds=(-1e20, 1e20), default_bounds=(-1e20, 1e20), unit="$[N]$"
+ ),
+ "fd": Bounds(
+ physical_bounds=(-1e20, 1e20), default_bounds=(-1e20, 1e20), unit="$[N]$"
+ ),
+ "mnn": Bounds(
+ physical_bounds=(-SQRT2, SQRT2), default_bounds=(-SQRT2, SQRT2), unit="$[Nm]$"
+ ),
+ "mee": Bounds(
+ physical_bounds=(-SQRT2, SQRT2), default_bounds=(-SQRT2, SQRT2), unit="$[Nm]$"
+ ),
+ "mdd": Bounds(
+ physical_bounds=(-SQRT2, SQRT2), default_bounds=(-SQRT2, SQRT2), unit="$[Nm]$"
+ ),
+ "mne": Bounds(physical_bounds=moffdiag, default_bounds=moffdiag, unit="$[Nm]$"),
+ "mnd": Bounds(physical_bounds=moffdiag, default_bounds=moffdiag, unit="$[Nm]$"),
+ "med": Bounds(physical_bounds=moffdiag, default_bounds=moffdiag, unit="$[Nm]$"),
+ "magnitude": Bounds(
+ physical_bounds=(-5.0, 10.0), default_bounds=(4.0, 7.0), unit=""
+ ),
+ "eps_xx": Bounds(
+ physical_bounds=(-num.inf, num.inf), default_bounds=(0, 1), unit=""
+ ),
+ "eps_yy": Bounds(
+ physical_bounds=(-num.inf, num.inf), default_bounds=(0, 1), unit=""
+ ),
+ "eps_xy": Bounds(
+ physical_bounds=(-num.inf, num.inf), default_bounds=(0, 1), unit=""
+ ),
+ "rotation": Bounds(
+ physical_bounds=(-num.inf, num.inf),
+ default_bounds=(-200.0, 200.0),
+ unit="$[rad]$",
+ ),
+ "pole_lat": Bounds(
+ physical_bounds=(-90.0, 90.0), default_bounds=(0, 1), unit=r"$[^\circ]$"
+ ),
+ "pole_lon": Bounds(
+ physical_bounds=(-180.0, 180.0), default_bounds=(0, 1), unit=r"$[^\circ]$"
+ ),
+ "omega": Bounds(
+ physical_bounds=(-10.0, 10.0), default_bounds=(0.5, 0.6), unit=r"$[^\circ/myr]$"
+ ),
+ "w": Bounds(
+ physical_bounds=(-3.0 / 8.0 * num.pi, 3.0 / 8.0 * num.pi),
+ default_bounds=(-3.0 / 8.0 * num.pi, 3.0 / 8.0 * num.pi),
+ unit="$[rad]$",
+ ),
+ "v": Bounds(
+ physical_bounds=(-1.0 / 3, 1.0 / 3),
+ default_bounds=(-1.0 / 3, 1.0 / 3),
+ unit="$[rad]$",
+ ),
+ "kappa": Bounds(
+ physical_bounds=(0.0, 2 * num.pi),
+ default_bounds=(0.0, 2 * num.pi),
+ unit=r"$[^\circ]$",
+ ),
+ "sigma": Bounds(
+ physical_bounds=(-num.pi / 2.0, num.pi / 2.0),
+ default_bounds=(-num.pi / 2.0, num.pi / 2.0),
+ unit=r"$[^\circ]$",
+ ),
+ "h": Bounds(
+ physical_bounds=(0.0, 1.0), default_bounds=(0.0, 1.0), unit=r"$[^\circ]$"
+ ),
+ "length": Bounds(
+ physical_bounds=(0.0, 7000.0), default_bounds=(5.0, 30.0), unit="$[km]$"
+ ),
+ "width": Bounds(
+ physical_bounds=(0.0, 500.0), default_bounds=(5.0, 20.0), unit="$[km]$"
+ ),
+ "time": Bounds(
+ physical_bounds=(-200.0, 200.0), default_bounds=(-5.0, 5.0), unit="$[s]$"
+ ),
+ "delta_time": Bounds(
+ physical_bounds=(0.0, 100.0), default_bounds=(0.0, 10.0), unit="$[s]$"
+ ),
+ "depth_bottom": Bounds(
+ physical_bounds=(0.0, 300.0), default_bounds=(0.0, 10.0), unit="$[km]$"
+ ),
+ "distance": Bounds(
+ physical_bounds=(0.0, 300.0), default_bounds=(0.0, 10.0), unit="$[km]$"
+ ),
+ "duration": Bounds(
+ physical_bounds=(0.0, 600.0), default_bounds=(1.0, 30.0), unit="$[s]$"
+ ),
+ "peak_ratio": Bounds(
+ physical_bounds=(0.0, 1.0), default_bounds=(0.0, 1.0), unit=""
+ ),
+ "hypers": Bounds(physical_bounds=(-4.0, 10.0), default_bounds=(-2.0, 6.0), unit=""),
+ "ramp": Bounds(
+ physical_bounds=(-0.005, 0.005), default_bounds=(-0.005, 0.005), unit="$[rad]$"
+ ),
+ "offset": Bounds(
+ physical_bounds=(-0.05, 0.05), default_bounds=(-0.05, 0.05), unit="$[m]$"
+ ),
+ "lat": Bounds(
+ physical_bounds=(30.0, 30.5), default_bounds=(30.0, 30.5), unit=r"$[^\circ]$"
+ ),
+ "lon": Bounds(
+ physical_bounds=(30.0, 30.5), default_bounds=(30.0, 30.5), unit=r"$[^\circ]$"
+ ),
+ "traction": Bounds(
+ physical_bounds=(0, 1000), default_bounds=(0, 50), unit="$[MPa]$"
+ ),
+ "strike_traction": Bounds(
+ physical_bounds=(-15000, 15000), default_bounds=(-50, 50), unit="$[MPa]$"
+ ),
+ "dip_traction": Bounds(
+ physical_bounds=(-15000, 15000), default_bounds=(-50, 50), unit="$[MPa]$"
+ ),
+ "normal_traction": Bounds(
+ physical_bounds=(-15000, 15000), default_bounds=(-50, 50), unit="$[MPa]$"
+ ),
+ "a_half_axis": Bounds(
+ physical_bounds=(0.01, 100), default_bounds=(0.01, 10), unit="$[km]$"
+ ),
+ "b_half_axis": Bounds(
+ physical_bounds=(0.01, 100), default_bounds=(0.01, 10), unit="$[km]$"
+ ),
+ "a_half_axis_bottom": Bounds(
+ physical_bounds=(0.01, 100), default_bounds=(0.01, 10), unit="$[km]$"
+ ),
+ "b_half_axis_bottom": Bounds(
+ physical_bounds=(0.01, 100), default_bounds=(0.01, 10), unit="$[km]$"
+ ),
+ "plunge": Bounds(
+ physical_bounds=(0, 90), default_bounds=(0, 20), unit=r"$[^\circ]$"
+ ),
+ "delta_east_shift_bottom": Bounds(
+ physical_bounds=(-500, 500), default_bounds=(-10, 10), unit="$[km]$"
+ ),
+ "delta_north_shift_bottom": Bounds(
+ physical_bounds=(-500, 500), default_bounds=(-10, 10), unit="$[km]$"
+ ),
+ "curv_amplitude_bottom": Bounds(
+ physical_bounds=moffdiag, default_bounds=moffdiag, unit=""
+ ),
+ "curv_location_bottom": Bounds(
+ physical_bounds=(0.0, 1.0), default_bounds=(0.0, 1.0), unit=""
+ ),
+ "bend_location": Bounds(
+ physical_bounds=(0.0, 1.0), default_bounds=(0.0, 1.0), unit=""
+ ),
+ "bend_amplitude": Bounds(
+ physical_bounds=moffdiag, default_bounds=moffdiag, unit=""
+ ),
+ "like": Bounds(physical_bounds=(-num.inf, num.inf), default_bounds=(0, 1), unit=""),
+}
+
+
+def hypername(varname):
+ return varname if varname in parameter_info.keys() else "hypers"
+
+
+def make_path_tmpl(name="defaults"):
+ return os.path.join(beat_dir_tmpl, f"{name}.pf")
+
+
+def init_parameter_defaults():
+ defaults = ParameterDefaults()
+ for parameter_name, bounds in parameter_info.items():
+ defaults.parameters[parameter_name] = bounds
+ return defaults
+
+
+def get_defaults(force=True):
+ defaults_path = expand(make_path_tmpl())
+ if not os.path.exists(defaults_path) or force:
+ defaults = init_parameter_defaults()
+ util.ensuredirs(defaults_path)
+ dump(defaults, filename=defaults_path)
+ else:
+ defaults = load(filename=defaults_path)
+ return defaults
+
+
+defaults = get_defaults()
+defaults = get_defaults()
diff --git a/beat/fast_sweeping/fast_sweep.py b/beat/fast_sweeping/fast_sweep.py
index 1ecb959c..c5741cca 100644
--- a/beat/fast_sweeping/fast_sweep.py
+++ b/beat/fast_sweeping/fast_sweep.py
@@ -11,12 +11,12 @@
S 0025-5718(04)01678-3
"""
-import numpy as num
-import theano
-import theano.tensor as tt
-from theano.ifelse import ifelse
-
import fast_sweep_ext
+import numpy as num
+import pytensor
+import pytensor.tensor as tt
+from pytensor.ifelse import ifelse
+from pytensor.scan.utils import until
km = 1000.0
@@ -230,10 +230,10 @@ def upwind(
return StartTimes
-def get_rupture_times_theano(slownesses, patch_size, nuc_x, nuc_y):
+def get_rupture_times_pytensor(slownesses, patch_size, nuc_x, nuc_y):
"""
Does the same calculation as get_rupture_times_numpy
- just with symbolic variable input and output for theano graph
+ just with symbolic variable input and output for pytensor graph
implementation optimization.
"""
[step_dip_max, step_str_max] = slownesses.shape
@@ -241,8 +241,8 @@ def get_rupture_times_theano(slownesses, patch_size, nuc_x, nuc_y):
StartTimes = tt.set_subtensor(StartTimes[nuc_y, nuc_x], 0)
# Stopping check var
- epsilon = theano.shared(0.1)
- err_val = theano.shared(1e6)
+ epsilon = pytensor.shared(0.1)
+ err_val = pytensor.shared(1e6)
# Iterator matrixes
dip1 = tt.repeat(tt.arange(step_dip_max), step_str_max)
@@ -263,7 +263,7 @@ def get_rupture_times_theano(slownesses, patch_size, nuc_x, nuc_y):
### Upwind scheme ###
def upwind(dip_ind, str_ind, StartTimes, slownesses, patch_size):
[n_patch_dip, n_patch_str] = slownesses.shape
- zero = theano.shared(0)
+ zero = pytensor.shared(0)
s1 = str_ind - 1
d1 = dip_ind - 1
s2 = str_ind + 1
@@ -293,9 +293,7 @@ def upwind(dip_ind, str_ind, StartTimes, slownesses, patch_size):
# xnew = |
# |0.5 * [ a+b+sqrt( 2*f^2*h^2 - (a-b)^2 ) ], |a-b| < f*h
start_new = ifelse(
- tt.le(
- slownesses[dip_ind, str_ind] * patch_size, tt.abs_(ST_xmin - ST_ymin)
- ),
+ tt.le(slownesses[dip_ind, str_ind] * patch_size, tt.abs(ST_xmin - ST_ymin)),
tt.min((ST_xmin, ST_ymin)) + slownesses[dip_ind, str_ind] * patch_size,
(
ST_xmin
@@ -319,7 +317,7 @@ def upwind(dip_ind, str_ind, StartTimes, slownesses, patch_size):
)
def loop_upwind(StartTimes, PreviousTimes, err_val, iteration, epsilon):
- [results, updates] = theano.scan(
+ [results, updates] = pytensor.scan(
fn=upwind,
sequences=[DIP, STR],
outputs_info=[StartTimes],
@@ -332,13 +330,13 @@ def loop_upwind(StartTimes, PreviousTimes, err_val, iteration, epsilon):
PreviousTimes = StartTimes.copy()
return (
(StartTimes, PreviousTimes, err_val, iteration + 1),
- theano.scan_module.until(err_val < epsilon),
+ until(err_val < epsilon),
)
# while loop until err < epsilon
- iteration = theano.shared(0)
+ iteration = pytensor.shared(0)
PreviousTimes = StartTimes.copy()
- ([result, PreviousTimes, errs, Iteration], updates) = theano.scan(
+ ([result, PreviousTimes, errs, Iteration], updates) = pytensor.scan(
fn=loop_upwind,
outputs_info=[StartTimes, PreviousTimes, err_val, iteration],
non_sequences=[epsilon],
diff --git a/beat/ffi/base.py b/beat/ffi/base.py
index 3c01cc33..1ae2403b 100644
--- a/beat/ffi/base.py
+++ b/beat/ffi/base.py
@@ -3,11 +3,11 @@
from multiprocessing import RawArray
import numpy as num
-import theano.tensor as tt
+import pytensor.tensor as tt
from pyrocko.guts import load
from pyrocko.trace import Trace
-from theano import config as tconfig
-from theano import shared
+from pytensor import config as tconfig
+from pytensor import shared
from beat import heart, parallel
from beat.config import GeodeticGFLibraryConfig, SeismicGFLibraryConfig
@@ -17,7 +17,7 @@
gf_dtype = "float64"
-backends = {"numpy": num, "theano": tt}
+backends = {"numpy": num, "pytensor": tt}
def get_backend(backend):
@@ -65,7 +65,6 @@ class GFLibrary(object):
"""
def __init__(self, config):
-
self.config = config
self._gfmatrix = None
self._sgfmatrix = None
@@ -74,10 +73,10 @@ def __init__(self, config):
self._stack_switch = {}
def _check_mode_init(self, mode):
- if mode == "theano":
+ if mode == "pytensor":
if self._sgfmatrix is None:
raise GFLibraryError(
- 'To use "stack_all" theano stacking optimization mode'
+ 'To use "stack_all" pytensor stacking optimization mode'
" has to be initialised!"
)
@@ -108,11 +107,10 @@ def patchidxs(self):
def sw_patchidxs(self):
if self._mode == "numpy":
return self.patchidxs
- elif self._mode == "theano":
+ elif self._mode == "pytensor":
return self.spatchidxs
def save_config(self, outdir="", filename=None):
-
filename = filename or "%s" % self.filename
outpath = os.path.join(outdir, filename + ".yaml")
logger.debug("Dumping GF config to %s" % outpath)
@@ -122,7 +120,6 @@ def save_config(self, outdir="", filename=None):
self.config.dump(filename=outpath, header=header)
def load_config(self, filename):
-
try:
config = load(filename=filename)
except IOError:
@@ -134,7 +131,7 @@ def set_stack_mode(self, mode="numpy"):
"""
Sets mode on witch backend the stacking is working.
Dependent on that the input to the stack function has to be
- either of :class:`numpy.ndarray` or of :class:`theano.tensor.Tensor`
+ either of :class:`numpy.ndarray` or of :class:`pytensor.tensor.Tensor`
Parameters
----------
@@ -152,7 +149,7 @@ def set_stack_mode(self, mode="numpy"):
def get_stack_mode(self):
"""
- Returns string of stack mode either "numpy" or "theano"
+ Returns string of stack mode either "numpy" or "pytensor"
"""
return self._mode
@@ -202,7 +199,6 @@ class GeodeticGFLibrary(GFLibrary):
"""
def __init__(self, config=GeodeticGFLibraryConfig()):
-
super(GeodeticGFLibrary, self).__init__(config=config)
self._sgfmatrix = None
@@ -237,7 +233,6 @@ def save(self, outdir="", filename=None):
self.save_config(outdir=outdir, filename=filename)
def setup(self, npatches, nsamples, allocate=False):
-
self.dimensions = (npatches, nsamples)
if allocate:
@@ -247,7 +242,6 @@ def setup(self, npatches, nsamples, allocate=False):
self.set_stack_mode(mode="numpy")
def init_optimization(self):
-
logger.info("Setting %s GF Library to optimization mode." % self.filename)
self._sgfmatrix = shared(
self._gfmatrix.astype(tconfig.floatX), name=self.filename, borrow=True
@@ -256,9 +250,9 @@ def init_optimization(self):
self.spatchidxs = shared(self.patchidxs, name="geo_patchidx_vec", borrow=True)
- self._stack_switch = {"numpy": self._gfmatrix, "theano": self._sgfmatrix}
+ self._stack_switch = {"numpy": self._gfmatrix, "pytensor": self._sgfmatrix}
- self.set_stack_mode(mode="theano")
+ self.set_stack_mode(mode="pytensor")
def put(self, entries, patchidx):
"""
@@ -298,7 +292,7 @@ def put(self, entries, patchidx):
def stack_all(self, slips):
"""
Stack all patches for all targets at once.
- In theano for efficient optimization.
+ In pytensor for efficient optimization.
Parameters
----------
@@ -337,7 +331,6 @@ class SeismicGFLibrary(GFLibrary):
"""
def __init__(self, config=SeismicGFLibraryConfig()):
-
super(SeismicGFLibrary, self).__init__(config=config)
self._sgfmatrix = None
@@ -382,7 +375,6 @@ def save(self, outdir="", filename=None):
def setup(
self, ntargets, npatches, ndurations, nstarttimes, nsamples, allocate=False
):
-
self.dimensions = (ntargets, npatches, ndurations, nstarttimes, nsamples)
if allocate:
@@ -393,7 +385,6 @@ def setup(
self.set_stack_mode(mode="numpy")
def init_optimization(self):
-
logger.info("Setting %s GF Library to optimization mode." % self.filename)
self._sgfmatrix = shared(
self._gfmatrix.astype(tconfig.floatX), name=self.filename, borrow=True
@@ -408,9 +399,9 @@ def init_optimization(self):
self.spatchidxs = shared(self.patchidxs, name="seis_patchidx_vec", borrow=True)
- self._stack_switch = {"numpy": self._gfmatrix, "theano": self._sgfmatrix}
+ self._stack_switch = {"numpy": self._gfmatrix, "pytensor": self._sgfmatrix}
- self.set_stack_mode(mode="theano")
+ self.set_stack_mode(mode="pytensor")
def set_patch_time(self, targetidx, tmin):
"""
@@ -494,18 +485,18 @@ def trace_tmin(self, targetidx):
def starttimes2idxs(self, starttimes, interpolation="nearest_neighbor"):
"""
Transforms starttimes into indexes to the GFLibrary.
- Depending on the stacking mode of the GFLibrary theano or numpy
+ Depending on the stacking mode of the GFLibrary pytensor or numpy
is used.
Parameters
----------
- starttimes [s]: :class:`numpy.ndarray` or :class:`theano.tensor.Tensor`
+ starttimes [s]: :class:`numpy.ndarray` or :class:`pytensor.tensor.Tensor`
of the rupturing of the patch, float
Returns
-------
starttimeidxs, starttimes : :class:`numpy.ndarray` or
- :class:`theano.tensor.Tensor`, int16
+ :class:`pytensor.tensor.Tensor`, int16
(output depends on interpolation scheme,
if multilinear interpolation factors are returned as well)
"""
@@ -543,18 +534,18 @@ def idxs2starttimes(self, idxs):
def durations2idxs(self, durations, interpolation="nearest_neighbor"):
"""
Transforms durations into indexes to the GFLibrary.
- Depending on the stacking mode of the GFLibrary theano or numpy
+ Depending on the stacking mode of the GFLibrary pytensor or numpy
is used.
Parameters
----------
- durations [s] : :class:`numpy.ndarray` or :class:`theano.tensor.Tensor`
+ durations [s] : :class:`numpy.ndarray` or :class:`pytensor.tensor.Tensor`
of the rupturing of the patch, float
Returns
-------
durationidxs, starttimes : :class:`numpy.ndarray` or
- :class:`theano.tensor.Tensor`, int16
+ :class:`pytensor.tensor.Tensor`, int16
"""
backend = get_backend(self._mode)
@@ -586,7 +577,7 @@ def stack(
):
"""
Stack selected traces from the GF Library of specified
- target, patch, durations and starttimes. Numpy or theano dependent
+ target, patch, durations and starttimes. Numpy or pytensor dependent
on the stack_mode
Parameters
@@ -594,7 +585,7 @@ def stack(
Returns
-------
- :class:`numpy.ndarray` or of :class:`theano.tensor.Tensor` dependent
+ :class:`numpy.ndarray` or of :class:`pytensor.tensor.Tensor` dependent
on stack mode
"""
durationidxs, rt_factors = self.durations2idxs(
@@ -623,11 +614,11 @@ def stack_all(
):
"""
Stack all patches for all targets at once.
- In theano for efficient optimization.
+ In pytensor for efficient optimization.
Parameters
----------
- starttimes: numpy or theano tensor
+ starttimes: numpy or pytensor tensor
size (ntargets, npatches) to be able to account for time-shifts!
Returns
@@ -635,7 +626,6 @@ def stack_all(
matrix : size (ntargets, nsamples)
option : tensor.batched_dot(sd.dimshuffle((1,0,2)), u).sum(axis=0)
"""
-
if targetidxs is None:
raise ValueError("Target indexes have to be defined!")
@@ -656,7 +646,6 @@ def stack_all(
)
if interpolation == "nearest_neighbor":
-
cd = (
self._stack_switch[self._mode][
targetidxs, patchidxs, durationidxs, starttimeidxs, :
@@ -670,7 +659,6 @@ def stack_all(
)
elif interpolation == "multilinear":
-
d_st_ceil_rt_ceil = self._stack_switch[self._mode][
targetidxs, patchidxs, durationidxs, starttimeidxs, :
].reshape((self.ntargets, npatches, self.nsamples))
@@ -713,7 +701,7 @@ def stack_all(
"Interpolation scheme %s not implemented!" % interpolation
)
- if self._mode == "theano":
+ if self._mode == "pytensor":
return tt.batched_dot(cd.dimshuffle((2, 0, 1)), cslips)
elif self._mode == "numpy":
@@ -813,10 +801,9 @@ def filename(self):
def _process_patch_geodetic(engine, gfs, targets, patch, patchidx, los_vectors, odws):
-
logger.debug("Patch Number %i", patchidx)
logger.debug("Calculating synthetics ...")
-
+ logger.debug(patch.__str__())
disp = heart.geo_synthetics(
engine=engine, targets=targets, sources=[patch], outmode="stacked_array"
)
@@ -1017,7 +1004,6 @@ def geo_construct_gf_linear_patches(
def _process_patch_seismic(
engine, gfs, targets, patch, patchidx, durations, starttimes
):
-
# ensur event reference time
logger.debug("Using reference event source time ...")
patch.time = gfs.config.event.time
@@ -1033,7 +1019,6 @@ def _process_patch_seismic(
source_patches_durations.append(pcopy)
for j, target in enumerate(targets):
-
traces, _ = heart.seis_synthetics(
engine=engine,
sources=source_patches_durations,
@@ -1141,7 +1126,7 @@ def seis_construct_gf_linear(
rupture_velocities = fault.vector2subfault(
idx, velocities_prior.get_lower(fault.subfault_npatches)
)
- except (IndexError):
+ except IndexError:
raise ValueError(
"Velocities need to be of size either"
" npatches or number of fault segments"
diff --git a/beat/ffi/fault.py b/beat/ffi/fault.py
index dddca3c8..bc89487c 100644
--- a/beat/ffi/fault.py
+++ b/beat/ffi/fault.py
@@ -6,13 +6,13 @@
import numpy as num
from matplotlib import pyplot as plt
from pyrocko.gf.seismosizer import Cloneable
-from pyrocko.guts import Dict, Float, Int, List, Object, dump, load
+from pyrocko.guts import Dict, Float, Int, List, Object, dump
from pyrocko.moment_tensor import moment_to_magnitude
from pyrocko.orthodrome import latlon_to_ne_numpy, ne_to_latlon
from pyrocko.plot import mpl_papersize
from pyrocko.util import ensuredir
+from pytensor import shared
from scipy.linalg import block_diag, svd
-from theano import shared
from beat.config import (
ResolutionDiscretizationConfig,
@@ -146,7 +146,6 @@ def get_model_resolution(self):
return None
def get_subfault_key(self, index, datatype, component):
-
if datatype is not None:
self._check_datatype(datatype)
else:
@@ -162,7 +161,6 @@ def get_subfault_key(self, index, datatype, component):
return datatype + "_" + component + "_" + str(index)
def setup_subfaults(self, datatype, component, ext_sources, replace=False):
-
if len(ext_sources) != self.nsubfaults:
raise FaultGeometryError("Setup does not match fault ordering!")
@@ -205,7 +203,6 @@ def iter_subfaults(self, idxs=None, datatype=None, component=None):
yield self.get_subfault(index=i, datatype=datatype, component=component)
def get_subfault(self, index, datatype=None, component=None):
-
datatype = self._assign_datatype(datatype)
component = self._assign_component(component)
@@ -229,7 +226,6 @@ def get_all_subfaults(self, datatype=None, component=None):
return subfaults
def set_subfault_patches(self, index, patches, datatype, component, replace=False):
-
source_key = self.get_subfault_key(index, datatype, component)
if source_key not in list(self._discretized_patches.keys()) or replace:
@@ -272,9 +268,7 @@ def get_all_patches(self, datatype=None, component=None):
'geodetic' or 'seismic'
component : str
slip component to return may be %s
- """ % list2string(
- slip_directions.keys()
- )
+ """ % list2string(slip_directions.keys())
datatype = self._assign_datatype(datatype)
component = self._assign_component(component)
@@ -312,7 +306,10 @@ def get_subfault_patch_moments(
if slips is not None:
rs.update(slip=slips[i])
- pm = rs.get_moment(target=target, store=store)
+ if slips[i] != 0.0:
+ pm = rs.get_moment(target=target, store=store)
+ else:
+ pm = 0.0
moments.append(pm)
return moments
@@ -324,7 +321,6 @@ def get_moment(self, point=None, store=None, target=None, datatype="geodetic"):
moments = []
for index in range(self.nsubfaults):
slips = self.get_total_slip(index, point)
-
sf_moments = self.get_subfault_patch_moments(
index=index, slips=slips, store=store, target=target, datatype=datatype
)
@@ -336,9 +332,13 @@ def get_magnitude(self, point=None, store=None, target=None, datatype="geodetic"
"""
Get total moment magnitude after Hanks and Kanamori 1979
"""
- return moment_to_magnitude(
- self.get_moment(point=point, store=store, target=target, datatype=datatype)
+ moment = self.get_moment(
+ point=point, store=store, target=target, datatype=datatype
)
+ if moment:
+ return moment_to_magnitude(moment)
+ else:
+ return moment
def get_total_slip(self, index=None, point={}, components=None):
"""
@@ -390,7 +390,6 @@ def get_subfault_patch_stfs(
for i, rs in enumerate(
self.get_subfault_patches(index=idx, datatype=datatype)
):
-
if starttimes.size != self.subfault_npatches[idx]:
starttimes_idx = self.vector2subfault(index=idx, vector=starttimes)
durations_idx = self.vector2subfault(index=idx, vector=durations)
@@ -409,7 +408,6 @@ def get_subfault_patch_stfs(
return patch_times, patch_amplitudes
def get_subfault_moment_rate_function(self, index, point, target, store):
-
deltat = store.config.deltat
slips = self.get_total_slip(index, point, components=["uparr", "uperp"])
starttimes = self.point2starttimes(point, index=index).ravel()
@@ -445,7 +443,6 @@ def get_subfault_moment_rate_function(self, index, point, target, store):
return mrf_rates, mrf_times
def get_moment_rate_function(self, index, point, target, store):
-
if isinstance(index, list):
pass
else:
@@ -488,14 +485,17 @@ def duplicate_property(array):
else:
raise TypeError("Only 1-2d data supported!")
- def patches2vertices(patches):
+ def patches2vertices(patches, keep_lastline=False):
verts = []
for patch in patches:
patch.anchor = "top"
xyz = patch.outline()
latlon = num.ones((5, 2)) * num.array([patch.lat, patch.lon])
patchverts = num.hstack((latlon, xyz))
- verts.append(patchverts[:-1, :]) # last vertex double
+ if keep_lastline:
+ verts.append(patchverts)
+ else:
+ verts.append(patchverts[:-1, :])
return num.vstack(verts)
@@ -566,7 +566,7 @@ def patches2vertices(patches):
outlines = []
for sf in self.iter_subfaults():
- outlines.append(patches2vertices([sf]))
+ outlines.append(patches2vertices([sf], keep_lastline=True))
faces1 = num.arange(ncorners * self.npatches, dtype="int64").reshape(
self.npatches, ncorners
@@ -632,7 +632,6 @@ def point2starttimes(self, point, index=0):
)
def var_from_point(self, index=None, point={}, varname=None):
-
try:
rv = point[varname]
except KeyError:
@@ -680,6 +679,7 @@ def point2sources(self, point, events=[]):
sf_patches = self.get_subfault_patches(
index, datatype=datatype, component=component
)
+ n_sf_patches = len(sf_patches)
ucomps = {}
for comp in slip_directions.keys():
@@ -687,7 +687,9 @@ def point2sources(self, point, events=[]):
slips = self.get_total_slip(index, point)
rakes = num.arctan2(-ucomps["uperp"], ucomps["uparr"]) * r2d + sf.rake
- opening_fractions = ucomps["utens"] / slips
+ opening_fractions = num.divide(
+ ucomps["utens"], slips, out=num.zeros_like(slips), where=slips != 0
+ )
sf_point = {
"slip": slips,
@@ -707,8 +709,8 @@ def point2sources(self, point, events=[]):
except KeyError:
pass
- patch_points = split_point(sf_point)
- assert len(patch_points) == len(sf_patches)
+ patch_points = split_point(sf_point, n_sources_total=n_sf_patches)
+ assert len(patch_points) == n_sf_patches
for patch, patch_point in zip(sf_patches, patch_points):
update_source(patch, **patch_point)
@@ -876,7 +878,7 @@ def fault_locations2idxs(
positions_strike : :class:`numpy.NdArray` float
of positions in strike direction of the fault [km]
backend : str
- which implementation backend to use [numpy/theano]
+ which implementation backend to use [numpy/pytensor]
"""
backend = get_backend(backend)
dipidx = positions2idxs(
@@ -941,7 +943,6 @@ def is_discretized(self):
return True if self.npatches else False
def get_derived_parameters(self, point=None, store=None, target=None, event=None):
-
has_pole, _ = check_point_keys(point, phrase="*_pole_lat")
if has_pole:
euler_slips = euler_pole2slips(point=point, fault=self, event=event)
@@ -1130,7 +1131,6 @@ class FaultOrdering(object):
"""
def __init__(self, npls, npws, patch_sizes_strike, patch_sizes_dip):
-
self.patch_sizes_dip = patch_sizes_dip
self.patch_sizes_strike = patch_sizes_strike
self.vmap = []
@@ -1300,7 +1300,6 @@ def check_subfault_consistency(a, nsources, parameter):
class InvalidDiscretizationError(Exception):
-
context = (
"Resolution based discretizeation" + " is available for geodetic data only! \n"
)
@@ -1418,7 +1417,6 @@ def get_division_mapping(patch_idxs, div_idxs, subfault_npatches):
count("npatches_old")
count("npatches_new")
for patch_idx in patch_idxs:
-
if patch_idx in div_idxs:
div2new[count("new")] = count("tot")
div2new[count("new")] = count("tot")
@@ -1757,7 +1755,7 @@ def sv_vec2matrix(sv_vec, ndata, nparams):
assert_array_equal(num.array(fault.subfault_npatches), new_subfault_npatches)
if False:
- fig, axs = plt.subplots(2, 3)
+ fig, axs = plt.subplots(2, 3) # noqa: F823
for i, gfidx in enumerate(
num.linspace(0, fault.npatches, 6, dtype="int", endpoint=False)
):
@@ -1770,7 +1768,7 @@ def sv_vec2matrix(sv_vec, ndata, nparams):
10,
num.vstack(gfs_array)[:, gfidx],
edgecolors="none",
- cmap=plt.cm.get_cmap("jet"),
+ cmap=plt.get_cmap("jet"),
)
ax.set_title("Patch idx %i" % gfidx)
@@ -1784,19 +1782,19 @@ def sv_vec2matrix(sv_vec, ndata, nparams):
# U data-space, L singular values, V model space
ndata, nparams = comp_gfs.shape
- U, l, V = svd(comp_gfs, full_matrices=True)
+ U, l_raw, V = svd(comp_gfs, full_matrices=True)
# apply singular value damping
- ldamped_inv = 1.0 / (l + config.epsilon**2)
+ ldamped_inv = 1.0 / (l_raw + config.epsilon**2)
Linv = sv_vec2matrix(ldamped_inv, ndata=ndata, nparams=nparams)
- L = sv_vec2matrix(l, ndata=ndata, nparams=nparams)
+ L = sv_vec2matrix(l_raw, ndata=ndata, nparams=nparams)
# calculate resolution matrix and take trace
if 0:
# for debugging
print("full_GFs", comp_gfs.shape)
print("V", V.shape)
- print("l", l.shape)
+ print("l", l_raw.shape)
print("L", L.shape)
print("Linnv", Linv.shape)
print("U", U.shape)
@@ -1992,14 +1990,12 @@ def sv_vec2matrix(sv_vec, ndata, nparams):
class ResolutionDiscretizationResult(Object):
-
epsilons = List.T(Float.T(), default=[0])
normalized_rspreads = List.T(Float.T(), default=[1.0])
faults_npatches = List.T(Int.T(), default=[1])
optimum = Dict.T(default=dict(), help="Optimum fault discretization parameters")
def plot(self):
-
fig, ax = plt.subplots(1, 1, figsize=mpl_papersize("a6", "landscape"))
ax.plot(
num.array(self.epsilons),
@@ -2028,7 +2024,6 @@ def plot(self):
return fig, ax
def derive_optimum_fault_geometry(self, debug=False):
-
data = num.vstack(
(num.array(self.epsilons), num.array(self.normalized_rspreads))
).T
@@ -2115,7 +2110,6 @@ def optimize_damping(
model_resolutions = []
dfaults = []
for epsilon in epsilons:
-
logger.info("Epsilon: %g", epsilon)
logger.info("--------------")
fault_discr_path = os.path.join(
diff --git a/beat/heart.py b/beat/heart.py
index ae44a2cf..c456f1a2 100644
--- a/beat/heart.py
+++ b/beat/heart.py
@@ -8,12 +8,10 @@
import os
import shutil
from collections import OrderedDict
-from random import choice, choices
from time import time
import numpy as num
-from pymc3 import plots as pmp
-from pyrocko import cake, crust2x2, gf, orthodrome, trace, util
+from pyrocko import cake, crust2x2, gf, orthodrome, trace
from pyrocko.cake import GradientLayer
from pyrocko.fomosto import qseis, qssp
from pyrocko.guts import (
@@ -28,14 +26,15 @@
Tuple,
)
from pyrocko.guts_array import Array
-from pyrocko.model import gnss, Event, get_effective_latlon
+from pyrocko.model import Event, gnss
from pyrocko.moment_tensor import to6
from pyrocko.spit import OutOfBounds
+from pytensor import config as tconfig
+from pytensor import shared
from scipy import linalg
-from theano import config as tconfig
-from theano import shared
from beat import utility
+from beat.defaults import defaults
# from pyrocko.fomosto import qseis2d
@@ -157,7 +156,6 @@ def check_matrix_init(self, cov_mat_str=""):
@property
def c_total(self):
-
self.check_matrix_init("data")
self.check_matrix_init("pred_g")
self.check_matrix_init("pred_v")
@@ -166,7 +164,6 @@ def c_total(self):
@property
def p_total(self):
-
self.check_matrix_init("pred_g")
self.check_matrix_init("pred_v")
@@ -249,13 +246,12 @@ def log_pdet(self):
def update_slog_pdet(self):
"""
Update shared variable with current log_norm_factor (lnf)
- (for theano models).
+ (for pytensor models).
"""
self.slog_pdet.set_value(self.log_pdet)
self.slog_pdet.astype(tconfig.floatX)
def get_min_max_components(self):
-
covmats = []
for comp in self.covs_supported():
covmats.append(getattr(self, comp))
@@ -343,7 +339,6 @@ class Trace(Object):
class FilterBase(Object):
-
lower_corner = Float.T(default=0.001, help="Lower corner frequency")
upper_corner = Float.T(default=0.1, help="Upper corner frequency")
ffactor = Float.T(
@@ -417,7 +412,6 @@ def apply(self, trace):
class FrequencyFilter(FilterBase):
-
tfade = Float.T(
default=20.0,
help="Rise/fall time in seconds of taper applied in timedomain at both"
@@ -515,7 +509,6 @@ def get_taper_frequencies(self):
class PolarityResult(Object):
-
point = ResultPoint.T(default=ResultPoint.D())
processed_obs = Array.T(optional=True)
llk = Float.T(default=0.0, optional=True)
@@ -533,7 +526,6 @@ def processed_syn(self):
def results_for_export(results, datatype=None, attributes=None):
-
if attributes is None:
if datatype is None:
raise ValueError("Either datatype or attributes need to be defined!")
@@ -560,78 +552,7 @@ def results_for_export(results, datatype=None, attributes=None):
sqrt2 = num.sqrt(2.0)
-physical_bounds = dict(
- east_shift=(-500.0, 500.0),
- north_shift=(-500.0, 500.0),
- depth=(0.0, 1000.0),
- strike=(-90.0, 420.0),
- strike1=(-90.0, 420.0),
- strike2=(-90.0, 420.0),
- dip=(-45.0, 135.0),
- dip1=(-45.0, 135.0),
- dip2=(-45.0, 135.0),
- rake=(-180.0, 270.0),
- rake1=(-180.0, 270.0),
- rake2=(-180.0, 270.0),
- mix=(0, 1),
- diameter=(0.0, 100.0),
- sign=(-1.0, 1.0),
- volume_change=(-1e12, 1e12),
- fn=(-1e20, 1e20),
- fe=(-1e20, 1e20),
- fd=(-1e20, 1e20),
- mnn=(-sqrt2, sqrt2),
- mee=(-sqrt2, sqrt2),
- mdd=(-sqrt2, sqrt2),
- mne=(-1.0, 1.0),
- mnd=(-1.0, 1.0),
- med=(-1.0, 1.0),
- exx=(-500.0, 500.0),
- eyy=(-500.0, 500.0),
- exy=(-500.0, 500.0),
- rotation=(-500.0, 500.0),
- w=(-3.0 / 8.0 * num.pi, 3.0 / 8.0 * num.pi),
- v=(-1.0 / 3, 1.0 / 3.0),
- kappa=(0.0, 2 * num.pi),
- sigma=(-num.pi / 2.0, num.pi / 2.0),
- h=(0.0, 1.0),
- length=(0.0, 7000.0),
- width=(0.0, 500.0),
- slip=(0.0, 150.0),
- nucleation_x=(-1.0, 1.0),
- nucleation_y=(-1.0, 1.0),
- opening_fraction=(-1.0, 1.0),
- magnitude=(-5.0, 10.0),
- time=(-300.0, 300.0),
- time_shift=(-40.0, 40.0),
- delta_time=(0.0, 100.0),
- delta_depth=(0.0, 300.0),
- distance=(0.0, 300.0),
- duration=(0.0, 600.0),
- peak_ratio=(0.0, 1.0),
- durations=(0.0, 600.0),
- uparr=(-1.0, 150.0),
- uperp=(-150.0, 150.0),
- utens=(-150.0, 150.0),
- nucleation_strike=(0.0, num.inf),
- nucleation_dip=(0.0, num.inf),
- velocities=(0.0, 20.0),
- azimuth=(0, 360),
- amplitude=(1.0, 10e25),
- bl_azimuth=(0, 360),
- bl_amplitude=(0.0, 0.2),
- locking_depth=(0.1, 100.0),
- hypers=(-20.0, 20.0),
- ramp=(-0.01, 0.01),
- offset=(-1.0, 1.0),
- lat=(-90.0, 90.0),
- lon=(-180.0, 180.0),
- omega=(-10.0, 10.0),
-)
-
-
def list_repeat(arr, repeat=1):
-
if isinstance(repeat, list):
if len(repeat) != arr.size:
raise ValueError(
@@ -678,8 +599,7 @@ class Parameter(Object):
)
def validate_bounds(self):
-
- supported_vars = list(physical_bounds.keys())
+ supported_vars = list(defaults.parameters.keys())
if self.name not in supported_vars:
candidate = self.name.split("_")[-1]
@@ -696,7 +616,7 @@ def validate_bounds(self):
else:
name = self.name
- phys_b = physical_bounds[name]
+ pb_lower, pb_upper = defaults[name].physical_bounds
if self.lower is not None:
for i in range(self.dimension):
if self.upper[i] < self.lower[i]:
@@ -716,7 +636,7 @@ def validate_bounds(self):
% (self.name, i)
)
- if self.upper[i] > phys_b[1] or self.lower[i] < phys_b[0]:
+ if self.upper[i] > pb_upper or self.lower[i] < pb_lower:
raise ValueError(
'The parameter bounds (%f, %f) for "%s" are outside of'
" physically meaningful values (%f, %f)!"
@@ -724,8 +644,8 @@ def validate_bounds(self):
self.lower[i],
self.upper[i],
self.name,
- phys_b[0],
- phys_b[1],
+ pb_lower,
+ pb_upper,
)
)
else:
@@ -773,8 +693,9 @@ def random(self, shape=None):
return (self.get_upper(shape) - lower) * rands + lower
except ValueError:
raise ValueError(
- "Value inconsistency shapes: {} parameter "
- "dimension {}".format(shape, self.dimension)
+ "Value inconsistency shapes: {} parameter " "dimension {}".format(
+ shape, self.dimension
+ )
)
@property
@@ -782,7 +703,7 @@ def dimension(self):
return self.lower.size
def bound_to_array(self):
- return num.array([self.lower, self.testval, self.upper], dtype=num.float)
+ return num.array([self.lower, self.testval, self.upper], dtype=num.float64)
phase_id_mapping = {"any_SH": "any_S", "any_SV": "any_S", "any_P": "any_P"}
@@ -835,7 +756,6 @@ def get_phase_definition(self, store):
return self._phase
def get_takeoff_angle_table(self, source, store):
-
takeoff_angle = store.get_stored_attribute(
phase_id_mapping[self.phase_id],
"takeoff_angle",
@@ -848,7 +768,6 @@ def get_takeoff_angle_table(self, source, store):
return takeoff_angle
def get_takeoff_angle_cake(self, source, store):
-
mod = store.config.earthmodel_1d
rays = mod.arrivals(
phases=self.get_phase_definition(store).phases,
@@ -865,7 +784,6 @@ def get_takeoff_angle_cake(self, source, store):
return takeoff_angle
def update_target(self, engine, source, always_raytrace=False, check=False):
-
self.azimuth_rad = self.azibazi_to(source)[1] * d2r
self.distance = self.distance_to(source)
logger.debug("source distance %f and depth %f", self.distance, source.depth)
@@ -1026,7 +944,6 @@ def __init__(
fmax=5.0,
deltaf=0.1,
):
-
super(SpectrumDataset, self).__init__(
network=network,
station=station,
@@ -1093,7 +1010,6 @@ def get_xdata(self):
class DynamicTarget(gf.Target):
-
response = trace.PoleZeroResponse.T(default=None, optional=True)
domain = StringChoice.T(
default="time",
@@ -1237,8 +1153,8 @@ class GNSSCompoundComponent(GeodeticDataset):
Make synthetics generation more efficient.
"""
- los_vector = Array.T(shape=(None, 3), dtype=num.float, optional=True)
- displacement = Array.T(shape=(None,), dtype=num.float, optional=True)
+ los_vector = Array.T(shape=(None, 3), dtype=float, optional=True)
+ displacement = Array.T(shape=(None,), dtype=float, optional=True)
component = String.T(default="east", help="direction of measurement, north/east/up")
stations = List.T(gnss.GNSSStation.T(optional=True))
covariance = Covariance.T(
@@ -1248,7 +1164,7 @@ class GNSSCompoundComponent(GeodeticDataset):
)
odw = Array.T(
shape=(None,),
- dtype=num.float,
+ dtype=num.float64,
help="Overlapping data weights, additional weight factor to the"
"dataset for overlaps with other datasets",
optional=True,
@@ -1347,7 +1263,9 @@ def get_data_mask(self, corr_config):
"Stations with idxs %s got blacklisted!"
% utility.list2string(station_blacklist_idxs)
)
- return num.array(station_blacklist_idxs)
+ mask = num.ones_like(self.lats, dtype=num.bool_)
+ mask[num.array(station_blacklist_idxs)] = False
+ return mask
def station_name_index_mapping(self):
if self._station2index is None:
@@ -1358,7 +1276,6 @@ def station_name_index_mapping(self):
@classmethod
def from_pyrocko_gnss_campaign(cls, campaign, components=["north", "east", "up"]):
-
valid_components = ["north", "east", "up"]
compounds = []
@@ -1403,7 +1320,6 @@ def from_pyrocko_gnss_campaign(cls, campaign, components=["north", "east", "up"]
class ResultReport(Object):
-
solution_point = Dict.T(help="result point")
post_llk = StringChoice.T(
choices=["max", "mean", "min"],
@@ -1425,11 +1341,11 @@ class IFG(GeodeticDataset):
master = String.T(optional=True, help="Acquisition time of master image YYYY-MM-DD")
slave = String.T(optional=True, help="Acquisition time of slave image YYYY-MM-DD")
- amplitude = Array.T(shape=(None,), dtype=num.float, optional=True)
- wrapped_phase = Array.T(shape=(None,), dtype=num.float, optional=True)
- incidence = Array.T(shape=(None,), dtype=num.float, optional=True)
- heading = Array.T(shape=(None,), dtype=num.float, optional=True)
- los_vector = Array.T(shape=(None, 3), dtype=num.float, optional=True)
+ amplitude = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ wrapped_phase = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ incidence = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ heading = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ los_vector = Array.T(shape=(None, 3), dtype=num.float64, optional=True)
satellite = String.T(default="Envisat")
def __str__(self):
@@ -1462,7 +1378,7 @@ def update_los_vector(self, force=False):
Se = -num.sin(num.deg2rad(self.incidence)) * num.sin(
num.deg2rad(self.heading - 270)
)
- self.los_vector = num.array([Sn, Se, Su], dtype=num.float).T
+ self.los_vector = num.array([Sn, Se, Su], dtype=num.float64).T
if num.isnan(self.los_vector).any():
raise ValueError(
"There are Nan values in LOS vector for dataset: %s! "
@@ -1479,11 +1395,11 @@ class DiffIFG(IFG):
of synthetics and container for SAR data.
"""
- unwrapped_phase = Array.T(shape=(None,), dtype=num.float, optional=True)
- coherence = Array.T(shape=(None,), dtype=num.float, optional=True)
+ unwrapped_phase = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ coherence = Array.T(shape=(None,), dtype=num.float64, optional=True)
reference_point = Tuple.T(2, Float.T(), optional=True)
reference_value = Float.T(optional=True, default=0.0)
- displacement = Array.T(shape=(None,), dtype=num.float, optional=True)
+ displacement = Array.T(shape=(None,), dtype=num.float64, optional=True)
covariance = Covariance.T(
optional=True,
help=":py:class:`Covariance` that holds data"
@@ -1491,14 +1407,14 @@ class DiffIFG(IFG):
)
odw = Array.T(
shape=(None,),
- dtype=num.float,
+ dtype=num.float64,
help="Overlapping data weights, additional weight factor to the"
"dataset for overlaps with other datasets",
optional=True,
)
mask = Array.T(
shape=(None,),
- dtype=num.bool,
+ dtype=num.bool_,
help="Mask values for Euler pole region determination. "
"Click polygon mask in kite!",
optional=True,
@@ -1554,8 +1470,7 @@ def from_kite_scene(cls, scene, **kwargs):
mask = num.full(lats.size, False)
if polygons:
logger.info(
- "Found polygon mask in %s! Importing for Euler Pole"
- " correction ..." % name
+ "Found polygon mask in %s! Importing for corrections ..." % name
)
from matplotlib.path import Path
@@ -1585,14 +1500,10 @@ def from_kite_scene(cls, scene, **kwargs):
def get_data_mask(self, corr_conf):
"""
- Extracts mask from kite scene and returns mask indexes-
- maybe during import?!!!
+ Returns extracted mask from kite scene
"""
- if corr_conf.feature == "Euler Pole":
- logger.info("Masking data for Euler Pole estimation!")
- return self.mask
- else:
- return None
+ logger.info("Masking data for %s estimation!" % corr_conf.feature)
+ return self.mask
class GeodeticResult(Object):
@@ -1601,9 +1512,9 @@ class GeodeticResult(Object):
"""
point = ResultPoint.T(default=ResultPoint.D())
- processed_obs = Array.T(shape=(None,), dtype=num.float, optional=True)
- processed_syn = Array.T(shape=(None,), dtype=num.float, optional=True)
- processed_res = Array.T(shape=(None,), dtype=num.float, optional=True)
+ processed_obs = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ processed_syn = Array.T(shape=(None,), dtype=num.float64, optional=True)
+ processed_res = Array.T(shape=(None,), dtype=num.float64, optional=True)
llk = Float.T(default=0.0, optional=True)
@@ -1697,6 +1608,7 @@ def get_store_id(prefix, earth_model_name, sample_rate, crust_ind=0):
def init_geodetic_targets(
datasets,
+ event,
earth_model_name="ak135-f-average.m",
interpolation="nearest_neighbor",
crust_inds=[0],
@@ -1711,6 +1623,8 @@ def init_geodetic_targets(
datasets : list
of :class:`heart.GeodeticDataset` for which the targets are being
initialised
+ event : :class:`pyrocko.model.Event`
+ for geographic referencing of the targets
earth_model_name = str
Name of the earth model that has been used for GF calculation.
sample_rate : scalar, float
@@ -1728,10 +1642,15 @@ def init_geodetic_targets(
em_name = get_earth_model_prefix(earth_model_name)
+ for data in datasets:
+ data.update_local_coords(event)
+
targets = [
gf.StaticTarget(
- lons=d.lons,
- lats=d.lats,
+ lons=num.full_like(d.lons, event.lon),
+ lats=num.full_like(d.lons, event.lat),
+ east_shifts=d.east_shifts,
+ north_shifts=d.north_shifts,
interpolation=interpolation,
quantity="displacement",
store_id=get_store_id("statics", em_name, sample_rate, crust_ind),
@@ -1751,7 +1670,6 @@ def init_polarity_targets(
reference_location=None,
wavename="any_P",
):
-
if reference_location is None:
store_prefixes = [copy.deepcopy(station.station) for station in stations]
else:
@@ -2094,11 +2012,16 @@ def get_slowness_taper(fomosto_config, velocity_model, distances):
phases=all_phases, distances=dists, zstart=mean_source_depth
)
- ps = num.array([arrivals[i].p for i in range(len(arrivals))])
+ if len(arrivals) == 0:
+ raise ValueError(
+ "No ray arrivals for tabluated phases in distance depth range! Please double check the coordinates of "
+ "the reference_location under gf_config!"
+ )
+
+ ps = num.array([ray.p for ray in arrivals])
slownesses = ps / (cake.r2d * cake.d2m / km)
smax = slownesses.max()
-
return (0.0, 0.0, 1.1 * float(smax), 1.3 * float(smax))
@@ -2462,7 +2385,10 @@ def polarity_construct_gf(
if not os.path.exists(phases_dir) or force:
store = gf.Store(store_dir, "r")
if polgf.always_raytrace:
- logger.info("Creating dummy store ...")
+ logger.info(
+ "Enabled `always_raytrace` flag - Creating dummy store without "
+ "take-off angles. Please disable flag to calculate!"
+ )
else:
logger.info("Calculating interpolation tables ...")
store.make_travel_time_tables(force=force)
@@ -2472,7 +2398,7 @@ def polarity_construct_gf(
# create dummy files for engine to recognize the store
for fn in ["index", "traces"]:
dummy_fn = os.path.join(store_dir, fn)
- with open(dummy_fn, "a") as f:
+ with open(dummy_fn, "a"):
pass
else:
logger.info("Phases exist use force=True to overwrite!")
@@ -2677,7 +2603,6 @@ def get_phase_taperer(
class BaseMapping(object):
def __init__(self, stations, targets, mapnumber=0):
-
self.name = "base"
self.mapnumber = mapnumber
self.stations = stations
@@ -2755,7 +2680,6 @@ def n_data(self):
return len(self.datasets)
def get_target_idxs(self, channels=["Z"]):
-
t2i = self.target_index_mapping()
dtargets = utility.gather(self.targets, lambda t: t.codes[3])
@@ -2777,7 +2701,6 @@ def _check_specific_consistency(self):
class PolarityMapping(BaseMapping):
def __init__(self, config, stations, targets, mapnumber=0):
-
BaseMapping.__init__(self, stations, targets, mapnumber)
self.config = config
@@ -2787,7 +2710,6 @@ def __init__(self, config, stations, targets, mapnumber=0):
self._radiation_weights = None
def get_station_names_data(self):
-
if self.datasets is None:
self.datasets = self._load_phase_markers(self.config.polarities_marker_path)
@@ -2799,7 +2721,6 @@ def get_station_names_data(self):
return station_names
def get_polarities(self):
-
if self.datasets is None:
self.datasets = self._load_phase_markers(self.config.polarities_marker_path)
@@ -2809,7 +2730,6 @@ def get_polarities(self):
)
def get_station_names_without_data(self):
-
blacklist = []
station_names = self.get_station_names()
dataset_station_names = self.get_station_names_data()
@@ -2894,7 +2814,6 @@ def shared_data_array(self):
return shared(self._prepared_data, name="%s_data" % self.name, borrow=True)
def update_targets(self, engine, source, always_raytrace=False, check=False):
-
for target in self.targets:
target.update_target(
engine, source, always_raytrace=always_raytrace, check=check
@@ -2954,7 +2873,7 @@ class WaveformMapping(BaseMapping):
stations : list
of :class:`pyrocko.model.Station`
weights : list
- of theano.shared variables
+ of pytensor.shared variables
channels : list
of channel names valid for all the stations of this wavemap
datasets : list
@@ -2975,7 +2894,6 @@ def __init__(
deltat=None,
mapnumber=0,
):
-
BaseMapping.__init__(self, stations, targets, mapnumber)
self.name = name
@@ -3041,7 +2959,6 @@ def station_weeding(self, event, distances, blacklist=[]):
self.check_consistency()
def get_station_names_without_data(self):
-
blacklist = []
station_names = self.get_station_names()
dataset_station_names = [utility.get_ns_id(tr.nslc_id) for tr in self.datasets]
@@ -3085,7 +3002,6 @@ def hypersize(self):
)
def get_marker_arrival_times(self):
-
if self._phase_markers is None:
try:
self._load_phase_markers(self.config.arrivals_marker_path)
@@ -3172,14 +3088,12 @@ def get_nsamples_time(self, chop_bounds=["b", "c"], pad_to_pow2=False):
return nsamples
def get_nsamples_spectrum(self, chop_bounds=["b", "c"], pad_to_pow2=True):
-
lower_idx, upper_idx = self.get_valid_spectrum_indices(
chop_bounds=chop_bounds, pad_to_pow2=pad_to_pow2
)
return upper_idx - lower_idx
def get_valid_spectrum_indices(self, chop_bounds=["b", "c"], pad_to_pow2=True):
-
valid_spectrum_indices = utility.get_valid_spectrum_data(
deltaf=self.get_deltaf(chop_bounds, pad_to_pow2),
taper_frequencies=self.get_taper_frequencies(),
@@ -3200,7 +3114,6 @@ def shared_data_array(self):
return shared(self._prepared_data, name="%s_data" % self.name, borrow=True)
def _check_specific_consistency(self):
-
if self.n_t == 0:
raise CollectionError(
'No data left in mapping "%s" after applying the distance '
@@ -3258,7 +3171,6 @@ def __init__(self, stations, waveforms=None, target_deltat=None):
self._station2index = None
def adjust_sampling_datasets(self, deltat, snap=False, force=False):
-
for tr in self._raw_datasets.values():
if tr.nslc_id not in self._datasets or force:
self._datasets[tr.nslc_id] = utility.downsample_trace(
@@ -3322,7 +3234,6 @@ def add_waveforms(self, waveforms=[], force=False):
self.waveforms.append(waveform)
def add_responses(self, responses, location=None):
-
self._responses = OrderedDict()
for k, v in responses.items():
@@ -3334,7 +3245,6 @@ def add_responses(self, responses, location=None):
self._responses[k] = v
def add_targets(self, targets, replace=False, force=False):
-
if replace:
self._targets = OrderedDict()
@@ -3346,7 +3256,6 @@ def add_targets(self, targets, replace=False, force=False):
logger.warn("Target %s already in collection!" % str(target.codes))
def add_datasets(self, datasets, location=None, replace=False, force=False):
-
if replace:
self._datasets = OrderedDict()
self._raw_datasets = OrderedDict()
@@ -3367,7 +3276,6 @@ def n_data(self):
return len(self._datasets.keys())
def get_waveform_mapping(self, waveform, config):
-
### Mahdi, need to sort domain stuff
self._check_collection(waveform, errormode="not_in")
@@ -3905,7 +3813,6 @@ def seis_derivative(
n_stencil_steps = len(stencil)
tmp = num.zeros((n_stencil_steps, ntargets, nsamples), dtype="float64")
for i, hstep in enumerate(stencil.hsteps):
-
if parameter in spatial_derivative_parameters:
target_param_name = spatial_derivative_parameters[parameter]
diff_targets = []
@@ -4166,7 +4073,6 @@ def pol_synthetics(
def fft_transforms(
time_domain_signals, valid_spectrum_indices, outmode="array", pad_to_pow2=True
):
-
if outmode not in stackmodes:
raise StackingError(
'Outmode "%s" not available! Available: %s'
@@ -4184,7 +4090,6 @@ def fft_transforms(
spec_signals = [None] * n_data
for i, tr in enumerate(time_domain_signals):
-
if outmode == "array":
n_samples = len(tr)
ydata = tr
@@ -4198,7 +4103,6 @@ def fft_transforms(
spec_signals[i] = num.abs(fydata)[lower_idx:upper_idx]
elif outmode in ["data", "tapered_data", "stacked_traces"]:
-
if outmode == "tapered_data":
tr = tr[0]
@@ -4265,9 +4169,19 @@ def geo_synthetics(
'stacked_arrays'
or list of
:class:`numpy.ndarray` (target.samples; ux-North, uy-East, uz-Down)
+ returns Nan in displacements if result is invalid!
"""
+ if False:
+ # for debugging
+ for source in sources:
+ print(source)
+
+ for target in targets:
+ print(target)
+
response = engine.process(sources, targets)
+
ns = len(sources)
nt = len(targets)
@@ -4277,10 +4191,10 @@ def stack_arrays(targets, disp_arrays):
for target in targets:
sapp(num.zeros([target.lons.size, 3]))
- for k in range(ns):
- for l in range(nt):
- idx = l + (k * nt)
- stacked_arrays[l] += disp_arrays[idx]
+ for i_s in range(ns):
+ for i_t in range(nt):
+ idx = i_t + (i_s * nt)
+ stacked_arrays[i_t] += disp_arrays[idx]
return stacked_arrays
@@ -4462,7 +4376,6 @@ def cartesian_to_local(lat, lon):
class StrainRateTensor(Object):
-
exx = Float.T(default=10)
eyy = Float.T(default=0)
exy = Float.T(default=0)
@@ -4572,11 +4485,11 @@ def get_ramp_displacement(locx, locy, azimuth_ramp, range_ramp, offset):
local coordinates [km] in east direction
locy : shared array-like :class:`numpy.ndarray`
local coordinates [km] in north direction
- azimuth_ramp : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
+ azimuth_ramp : :class:`pytensor.tensor.Tensor` or :class:`numpy.ndarray`
vector with ramp parameter in azimuth
- range_ramp : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
+ range_ramp : :class:`pytensor.tensor.Tensor` or :class:`numpy.ndarray`
vector with ramp parameter in range
- offset : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
+ offset : :class:`pytensor.tensor.Tensor` or :class:`numpy.ndarray`
scalar of offset in [m]
"""
return locy * azimuth_ramp + locx * range_ramp + offset
diff --git a/beat/inputf.py b/beat/inputf.py
index 972686fa..6d3dc959 100644
--- a/beat/inputf.py
+++ b/beat/inputf.py
@@ -188,7 +188,6 @@ def load_ascii_gnss_globk(filedir, filename, components=["east", "north", "up"])
def load_repsonses_from_file(projectpath):
-
network = ""
location = ""
@@ -426,7 +425,7 @@ def rotate_traces_and_stations(datatraces, stations, event):
try:
traces = station2traces[station.station]
- except (KeyError):
+ except KeyError:
logger.warning(
'Did not find data traces for station "%s"' % stations.station
)
diff --git a/beat/interseismic.py b/beat/interseismic.py
deleted file mode 100644
index c245a754..00000000
--- a/beat/interseismic.py
+++ /dev/null
@@ -1,334 +0,0 @@
-"""
-Module for interseismic models.
-
-Block-backslip model
---------------------
-The fault is assumed to be locked above a certain depth "locking_depth" and
-it is creeping with the rate of the defined plate- which is handled as a
-rigid block.
-
-STILL EXPERIMENTAL!
-
-References
-==========
-Savage & Prescott 1978
-Metzger et al. 2011
-"""
-
-import copy
-import logging
-
-import numpy as num
-from matplotlib.path import Path
-from pyrocko.gf import RectangularSource as RS
-from pyrocko.orthodrome import earthradius, latlon_to_ne_numpy, latlon_to_xyz
-
-from beat import utility
-from beat.heart import geo_synthetics
-
-logger = logging.getLogger("interseismic")
-
-km = 1000.0
-d2r = num.pi / 180.0
-r2d = 180.0 / num.pi
-
-non_source = set(["amplitude", "azimuth", "locking_depth"])
-
-
-__all__ = ["geo_backslip_synthetics"]
-
-
-def block_mask(easts, norths, sources, east_ref, north_ref):
- """
- Determine stable and moving observation points dependent on the input
- fault orientation.
-
- Parameters
- ----------
- easts : :class:`numpy.ndarray`
- east - local coordinates [m] of observations
- norths : :class:`numpy.ndarray`
- north - local coordinates [m] of observations
- sources : list
- of :class:`RectangularSource`
- east_ref : float
- east local coordinate [m] of stable reference
- north_ref : float
- north local coordinate [m] of stable reference
-
- Returns
- -------
- :class:`numpy.ndarray` with zeros at stable points, ones at moving points
- """
-
- def get_vertex(outlines, i, j):
- f1 = outlines[i]
- f2 = outlines[j]
- print(f1, f2)
- return utility.line_intersect(f1[0, :], f1[1, :], f2[0, :], f2[1, :])
-
- tol = 2.0 * km
-
- Eline = RS(
- east_shift=easts.max() + tol,
- north_shift=0.0,
- strike=0.0,
- dip=90.0,
- length=1 * km,
- )
- Nline = RS(
- east_shift=0.0,
- north_shift=norths.max() + tol,
- strike=90,
- dip=90.0,
- length=1 * km,
- )
- Sline = RS(
- east_shift=0.0,
- north_shift=norths.min() - tol,
- strike=90,
- dip=90.0,
- length=1 * km,
- )
-
- frame = [Nline, Eline, Sline]
-
- # collect frame lines
- outlines = []
- for source in sources + frame:
- outline = source.outline(cs="xy")
- outlines.append(utility.swap_columns(outline, 0, 1)[0:2, :])
-
- # get polygon vertices
- poly_vertices = []
- for i in range(len(outlines) - 1):
- poly_vertices.append(get_vertex(outlines, i, i + 1))
- else:
- poly_vertices.append(get_vertex(outlines, 0, -1))
-
- print(poly_vertices, outlines)
- polygon = Path(num.vstack(poly_vertices), closed=True)
-
- ens = num.vstack([easts.flatten(), norths.flatten()]).T
- ref_en = num.array([east_ref, north_ref]).flatten()
- print(ens)
- mask = polygon.contains_points(ens)
-
- if not polygon.contains_point(ref_en):
- return mask
-
- else:
- return num.logical_not(mask)
-
-
-def block_geometry(lons, lats, sources, reference):
- """
- Construct block geometry determine stable and moving parts dependent
- on the reference location.
-
- Parameters
- ----------
- lons : :class:`num.ndarray`
- Longitudes [deg] of observation points
- lats : :class:`num.ndarray`
- Latitudes [deg] of observation points
- sources : list
- of RectangularFault objects
- reference : :class:`heart.ReferenceLocation`
- reference location that determines the stable block
-
- Returns
- -------
- :class:`num.ndarray`
- mask with zeros/ones for stable/moving observation points, respectively
- """
-
- norths, easts = latlon_to_ne_numpy(reference.lat, reference.lon, lats, lons)
-
- return block_mask(easts, norths, sources, east_ref=0.0, north_ref=0.0)
-
-
-def block_movement(bmask, amplitude, azimuth):
- """
- Get block movements. Assumes one side of the model stable, therefore
- the moving side is moving 2 times the given amplitude.
-
- Parameters
- ----------
- bmask : :class:`numpy.ndarray`
- masked block determining stable and moving observation points
- amplitude : float
- slip [m] of the moving block
- azimuth : float
- azimuth-angle[deg] ergo direction of moving block towards North
-
- Returns
- -------
- :class:`numpy.ndarray`
- (n x 3) [North, East, Down] displacements [m]
- """
-
- tmp = num.repeat(bmask * 2.0 * float(amplitude), 3).reshape((bmask.shape[0], 3))
- sv = utility.strike_vector(float(azimuth), order="NEZ")
- return tmp * sv
-
-
-def geo_block_synthetics(lons, lats, sources, amplitude, azimuth, reference):
- """
- Block model: forward model for synthetic displacements(n,e,d) [m] caused by
- a rigid moving block defined by the bounding geometry of rectangular
- faults. The reference location determines the stable regions.
- The amplitude and azimuth determines the amount and direction of the
- moving block.
-
- Parameters
- ----------
- lons : :class:`num.ndarray`
- Longitudes [deg] of observation points
- lats : :class:`num.ndarray`
- Latitudes [deg] of observation points
- sources : list
- of RectangularFault objects
- amplitude : float
- slip [m] of the moving block
- azimuth : float
- azimuth-angle[deg] ergo direction of moving block towards North
- reference : :class:`heart.ReferenceLocation`
- reference location that determines the stable block
-
- Returns
- -------
- :class:`numpy.ndarray`
- (n x 3) [North, East, Down] displacements [m]
- """
- bmask = block_geometry(lons, lats, sources, reference)
- return block_movement(bmask, amplitude, azimuth)
-
-
-def backslip_params(azimuth, strike, dip, amplitude, locking_depth):
- """
- Transforms the interseismic blockmodel parameters to fault input parameters
- for the backslip model.
-
- Parameters
- ----------
- azimuth : float
- azimuth [deg] of the block-motion towards the North
- strike : float
- strike-angle[deg] of the backslipping fault
- dip : float
- dip-angle[deg] of the back-slipping fault
- amplitude : float
- slip rate of the blockmodel [m/yr]
- locking_depth : float
- locking depth [km] of the fault
-
- Returns
- -------
- dict of parameters for the back-slipping RectangularSource
- """
- if dip == 0.0:
- raise ValueError("Dip must not be zero!")
-
- az_vec = utility.strike_vector(azimuth)
- strike_vec = utility.strike_vector(strike)
- alpha = num.arccos(az_vec.dot(strike_vec))
- alphad = alpha * r2d
-
- sdip = num.sin(dip * d2r)
-
- # assuming dip-slip is zero --> strike slip = slip
- slip = num.abs(amplitude * num.cos(alpha))
- opening = -amplitude * num.sin(alpha) * sdip
-
- if alphad < 90.0 and alphad >= 0.0:
- rake = 0.0
- elif alphad >= 90.0 and alphad <= 180.0:
- rake = 180.0
- else:
- raise Exception("Angle between vectors inconsistent!")
-
- width = locking_depth * km / sdip
-
- return dict(
- slip=float(slip),
- opening=float(opening),
- width=float(width),
- depth=0.0,
- rake=float(rake),
- )
-
-
-def geo_backslip_synthetics(
- engine, sources, targets, lons, lats, reference, amplitude, azimuth, locking_depth
-):
- """
- Interseismic backslip model: forward model for synthetic
- displacements(n,e,d) [m] caused by a rigid moving block defined by the
- bounding geometry of rectangular faults. The reference location determines
- the stable regions. The amplitude and azimuth determines the amount and
- direction of the moving block.
- Based on this block-movement the upper part of the crust that is not locked
- is assumed to slip back. Thus the final synthetics are the superposition
- of the block-movement and the backslip.
-
- Parameters
- ----------
- engine : :class:`pyrocko.gf.seismosizer.LocalEngine`
- sources : list
- of :class:`pyrocko.gf.seismosizer.RectangularSource`
- Sources to calculate synthetics for
- targets : list
- of :class:`pyrocko.gf.targets.StaticTarget`
- lons : list of floats, or :class:`numpy.ndarray`
- longitudes [deg] of observation points
- lats : list of floats, or :class:`numpy.ndarray`
- latitudes [deg] of observation points
- amplitude : float
- slip [m] of the moving block
- azimuth : float
- azimuth-angle[deg] ergo direction of moving block towards North
- locking_depth : :class:`numpy.ndarray`
- locking_depth [km] of the fault(s) below there is no movement
- reference : :class:`heart.ReferenceLocation`
- reference location that determines the stable block
-
- Returns
- -------
- :class:`numpy.ndarray`
- (n x 3) [North, East, Down] displacements [m]
- """
-
- disp_block = geo_block_synthetics(
- lons, lats, sources, amplitude, azimuth, reference
- )
-
- for source, ld in zip(sources, locking_depth):
- source_params = backslip_params(
- azimuth=azimuth,
- amplitude=amplitude,
- locking_depth=ld,
- strike=source.strike,
- dip=source.dip,
- )
- source.update(**source_params)
-
- disp_block += geo_synthetics(
- engine=engine, targets=targets, sources=sources, outmode="stacked_array"
- )
-
- return disp_block
-
-
-def seperate_point(point):
- """
- Separate point into source object related components and the rest.
- """
- tpoint = copy.deepcopy(point)
-
- interseismic_point = {}
- for var in non_source:
- if var in tpoint.keys():
- interseismic_point[var] = tpoint.pop(var)
-
- return tpoint, interseismic_point
diff --git a/beat/models/base.py b/beat/models/base.py
index 98348ac9..a975dc66 100644
--- a/beat/models/base.py
+++ b/beat/models/base.py
@@ -3,12 +3,12 @@
from logging import getLogger
import numpy as num
-from pymc3 import Deterministic
+from pymc import Deterministic, Uniform
from pyrocko.util import ensuredir
from beat import config as bconfig
from beat.backend import SampleStage, thin_buffer
-from beat.models.distributions import hyper_normal, get_hyper_name
+from beat.models.distributions import get_hyper_name, hyper_normal
logger = getLogger("models.base")
@@ -36,6 +36,18 @@ def get_hypervalue_from_point(point, observe, counter, hp_specific=False):
return hp
+def init_uniform_random(kwargs):
+ try:
+ dist = Uniform(**kwargs)
+ except TypeError:
+ kwargs.pop("name")
+ kwargs.pop("initval")
+ kwargs.pop("transform")
+ dist = Uniform.dist(**kwargs)
+
+ return dist
+
+
class ConfigInconsistentError(Exception):
def __init__(self, errmess="", params="hierarchicals"):
self.default = (
@@ -59,7 +71,6 @@ class Composite(object):
"""
def __init__(self, events):
-
self.input_rvs = OrderedDict()
self.fixed_rvs = OrderedDict()
self.hierarchicals = OrderedDict()
@@ -114,7 +125,7 @@ def apply(self, weights):
Parameters
----------
- list : of Theano shared variables
+ list : of Pytensor shared variables
containing weight matrixes to use for updates
"""
@@ -184,7 +195,7 @@ def sample(step, problem):
Parameters
----------
- step : :class:`SMC` or :class:`pymc3.metropolis.Metropolis`
+ step : :class:`SMC` or :class:`pymc.metropolis.Metropolis`
from problem.init_sampler()
problem : :class:`Problem` with characteristics of problem to solve
"""
@@ -210,7 +221,10 @@ def sample(step, problem):
start = []
for i in tqdm(range(step.n_chains)):
point = problem.get_random_point()
- start.append(problem.lsq_solution(point))
+ # print(point)
+ lsq_point = problem.lsq_solution(point)
+ # print("lsq", lsq_point)
+ start.append(lsq_point)
else:
start = None
@@ -374,7 +388,6 @@ class Stage(object):
mtrace = None
def __init__(self, handler=None, homepath=None, stage_number=-1, backend="csv"):
-
if handler is not None:
self.handler = handler
elif handler is None and homepath is not None:
@@ -393,7 +406,7 @@ def load_results(
Parameters
----------
- model : :class:`pymc3.model.Model`
+ model : :class:`pymc.model.Model`
stage_number : int
Number of stage to load
chains : list, optional
@@ -443,7 +456,6 @@ def load_results(
def load_stage(problem, stage_number, load="trace", chains=[-1]):
-
stage = Stage(
homepath=problem.outfolder,
stage_number=stage_number,
diff --git a/beat/models/corrections.py b/beat/models/corrections.py
index 4aaabd5f..4038fb82 100644
--- a/beat/models/corrections.py
+++ b/beat/models/corrections.py
@@ -1,17 +1,17 @@
import logging
from collections import OrderedDict
-from numpy import array, zeros
+from numpy import array
from pyrocko import orthodrome
-from theano import config as tconfig
-from theano import shared
+from pytensor import config as tconfig
+from pytensor import shared
from beat.heart import (
get_ramp_displacement,
velocities_from_pole,
velocities_from_strain_rate_tensor,
)
-from beat.theanof import EulerPole, StrainRateTensor
+from beat.pytensorf import EulerPole, StrainRateTensor
logger = logging.getLogger("models.corrections")
@@ -50,7 +50,6 @@ def get_required_coordinate_names(self):
def setup_correction(
self, locy, locx, los_vector, data_mask, dataset_name, number=0
):
-
self.east_shifts = locx
self.north_shifts = locy
@@ -95,7 +94,6 @@ def get_required_coordinate_names(self):
def setup_correction(
self, locy, locx, los_vector, data_mask, dataset_name, number=0
):
-
self.los_vector = los_vector
self.lats = locy
self.lons = locx
@@ -117,7 +115,7 @@ def get_displacements(self, hierarchicals, point=None):
if not self.correction_names:
raise ValueError("Requested correction, but is not setup or configured!")
- if not point: # theano instance for get_formula
+ if not point: # pytensor instance for get_formula
inputs = OrderedDict()
for corr_name in self.correction_names:
inputs[corr_name] = hierarchicals[corr_name]
@@ -149,7 +147,6 @@ def get_required_coordinate_names(self):
def setup_correction(
self, locy, locx, los_vector, data_mask, dataset_name, number=0
):
-
self.los_vector = los_vector
self.lats = locy
self.lons = locx
@@ -164,14 +161,11 @@ def setup_correction(
self.los_vector.astype(tconfig.floatX), name="los", borrow=True
)
- def get_station_indexes(self):
- return array(self.strain_rate_tensor.station_idxs)
-
- def get_station_coordinates(self, indexes=None):
- if indexes is None:
- indexes = self.get_station_indexes()
+ def get_station_coordinates(self, mask=None):
+ if mask is None:
+ mask = self.data_mask
- return array(self.lats)[indexes], array(self.lons)[indexes]
+ return array(self.lats)[mask], array(self.lons)[mask]
def get_displacements(self, hierarchicals, point=None):
"""
@@ -180,7 +174,7 @@ def get_displacements(self, hierarchicals, point=None):
if not self.correction_names:
raise ValueError("Requested correction, but is not setup or configured!")
- if not point: # theano instance for get_formula
+ if not point: # pytensor instance for get_formula
inputs = OrderedDict()
for corr_name in self.correction_names:
inputs[corr_name] = hierarchicals[corr_name]
@@ -200,15 +194,11 @@ def get_displacements(self, hierarchicals, point=None):
kwargs = self.get_point_rvs(hierarchicals)
- valid = self.get_station_indexes()
- lats, lons = self.get_station_coordinates(valid)
-
- v_xyz = velocities_from_strain_rate_tensor(lats, lons, **kwargs)
+ v_xyz = velocities_from_strain_rate_tensor(
+ array(self.lats), array(self.lons), **kwargs
+ )
- if valid.size > 0:
- vels = zeros((self.lats.size, 3))
- vels[valid, :] = v_xyz
- else:
- vels = v_xyz
+ if self.data_mask.size > 0:
+ v_xyz[self.data_mask, :] = 0.0
- return (vels * self.los_vector).sum(axis=1)
+ return (v_xyz * self.los_vector).sum(axis=1)
diff --git a/beat/models/distributions.py b/beat/models/distributions.py
index 30c39acb..3bb323d7 100644
--- a/beat/models/distributions.py
+++ b/beat/models/distributions.py
@@ -1,10 +1,9 @@
from logging import getLogger
import numpy as num
-import theano.tensor as tt
-from theano import config as tconfig
-from theano import shared
-from theano.printing import Print
+import pytensor.tensor as tt
+from pytensor import config as tconfig
+from pytensor import shared
from beat.utility import Counter
@@ -38,10 +37,10 @@ def multivariate_normal(datasets, weights, hyperparams, residuals):
datasets : list
of :class:`heart.SeismicDataset` or :class:`heart.GeodeticDataset`
weights : list
- of :class:`theano.shared`
+ of :class:`pytensor.shared`
Square matrix of the inverse of the covariance matrix as weights
hyperparams : dict
- of :class:`theano.`
+ of :class:`pytensor.`
residual : list or array of model residuals
Returns
@@ -52,18 +51,18 @@ def multivariate_normal(datasets, weights, hyperparams, residuals):
logpts = tt.zeros((n_t), tconfig.floatX)
- for l, data in enumerate(datasets):
+ for i_l, data in enumerate(datasets):
M = tt.cast(shared(data.samples, name="nsamples", borrow=True), "int16")
hp_name = get_hyper_name(data)
norm = M * (2 * hyperparams[hp_name] + log_2pi)
logpts = tt.set_subtensor(
- logpts[l : l + 1],
+ logpts[i_l : i_l + 1],
(-0.5)
* (
data.covariance.slog_pdet
+ norm
+ (1 / tt.exp(hyperparams[hp_name] * 2))
- * (residuals[l].dot(weights[l]).dot(residuals[l].T))
+ * (residuals[i_l].dot(weights[i_l]).dot(residuals[i_l].T))
),
)
@@ -84,11 +83,11 @@ def multivariate_normal_chol(
datasets : list
of :class:`heart.SeismicDataset` or :class:`heart.GeodeticDataset`
weights : list
- of :class:`theano.shared`
+ of :class:`pytensor.shared`
Square matrix of the inverse of the lower triangular matrix of a
cholesky decomposed covariance matrix
hyperparams : dict
- of :class:`theano.`
+ of :class:`pytensor.`
residual : list or array of model residuals
hp_specific : boolean
if true, the hyperparameters have to be arrays size equal to
@@ -107,7 +106,7 @@ def multivariate_normal_chol(
adapted from https://www.quora.com/What-is-the-role-of-the-Cholesky-decomposition-in-finding-multivariate-normal-PDF
"""
if sparse:
- import theano.sparse as ts
+ import pytensor.sparse as ts
dot = ts.dot
else:
@@ -117,7 +116,7 @@ def multivariate_normal_chol(
logpts = tt.zeros((n_t), tconfig.floatX)
count = Counter()
- for l, data in enumerate(datasets):
+ for i_l, data in enumerate(datasets):
M = tt.cast(shared(data.samples, name="nsamples", borrow=True), "int16")
hp_name = get_hyper_name(data)
@@ -126,10 +125,10 @@ def multivariate_normal_chol(
else:
hp = hyperparams[hp_name]
- tmp = dot(weights[l], (residuals[l]))
+ tmp = dot(weights[i_l], (residuals[i_l]))
norm = M * (2 * hp + log_2pi)
logpts = tt.set_subtensor(
- logpts[l : l + 1],
+ logpts[i_l : i_l + 1],
(-0.5)
* (
data.covariance.slog_pdet
@@ -183,7 +182,7 @@ def hyper_normal(datasets, hyperparams, llks, hp_specific=False):
datasets : list
of :class:`heart.SeismicDatset` or :class:`heart.GeodeticDataset`
hyperparams : dict
- of :class:`theano.`
+ of :class:`pytensor.`
llks : posterior likelihoods
hp_specific : boolean
if true, the hyperparameters have to be arrays size equal to
diff --git a/beat/models/geodetic.py b/beat/models/geodetic.py
index 7d041054..ad67ad99 100644
--- a/beat/models/geodetic.py
+++ b/beat/models/geodetic.py
@@ -5,23 +5,22 @@
from time import time
import numpy as num
-import theano.tensor as tt
-from pymc3 import Deterministic, Uniform
-from pyrocko.gf import LocalEngine, RectangularSource
-from theano import config as tconfig
-from theano import shared
-from theano.printing import Print
+import pytensor.tensor as tt
+from pymc import Deterministic
+from pyrocko.gf import LocalEngine
+from pytensor import config as tconfig
+from pytensor import shared
from beat import config as bconfig
from beat import covariance as cov
-from beat import heart, theanof, utility
+from beat import heart, pytensorf, utility
from beat.ffi import get_gf_prefix, load_gf_library
-from beat.interseismic import geo_backslip_synthetics, seperate_point
from beat.models.base import (
Composite,
ConfigInconsistentError,
FaultGeometryNotFoundError,
get_hypervalue_from_point,
+ init_uniform_random,
)
from beat.models.distributions import multivariate_normal_chol
@@ -32,8 +31,8 @@
__all__ = [
+ "GeodeticBEMComposite",
"GeodeticGeometryComposite",
- "GeodeticInterseismicComposite",
"GeodeticDistributerComposite",
]
@@ -60,7 +59,6 @@ class GeodeticComposite(Composite):
weights = None
def __init__(self, gc, project_dir, events, hypers=False):
-
super(GeodeticComposite, self).__init__(events)
logger.debug("Setting up geodetic structure ...\n")
@@ -72,9 +70,22 @@ def __init__(self, gc, project_dir, events, hypers=False):
self.datasets = utility.load_objects(geodetic_data_path)
logger.info("Number of geodetic datasets: %i " % self.n_t)
+ # initialise local coordinate system and corrections
+ if gc.corrections_config.has_enabled_corrections:
+ correction_configs = gc.corrections_config.iter_corrections()
+ logger.info("Initialising corrections ...")
+ for data in self.datasets:
+ data.setup_corrections(
+ event=self.event, correction_configs=correction_configs
+ )
+ else:
+ for data in self.datasets:
+ data.update_local_coords(self.event)
+
# init geodetic targets
self.targets = heart.init_geodetic_targets(
datasets=self.datasets,
+ event=self.event,
earth_model_name=gc.gf_config.earth_model_name,
interpolation=gc.interpolation,
crust_inds=[gc.gf_config.reference_model_idx],
@@ -95,22 +106,14 @@ def __init__(self, gc, project_dir, events, hypers=False):
config=gc.noise_estimator, events=self.events
)
- if gc.corrections_config.has_enabled_corrections:
- correction_configs = gc.corrections_config.iter_corrections()
- logger.info("Initialising corrections ...")
- for data in self.datasets:
- data.setup_corrections(
- event=self.event, correction_configs=correction_configs
- )
-
self.config = gc
if hypers:
self._llks = []
- for t in range(self.n_t):
- self._llks.append(
- shared(num.array([1.0]), name="geo_llk_%i" % t, borrow=True)
- )
+ self._llks.extend(
+ shared(num.array([1.0]), name="geo_llk_%i" % t, borrow=True)
+ for t in range(self.n_t)
+ )
def init_weights(self):
self.weights = []
@@ -211,7 +214,7 @@ def assemble_results(self, point, **kwargs):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
Returns
@@ -221,7 +224,7 @@ def assemble_results(self, point, **kwargs):
logger.debug("Assembling geodetic data ...")
- processed_synts = self.get_synthetics(point, outmode="stacked_arrays")
+ processed_synts = self.get_synthetics(point)
results = []
for i, data in enumerate(self.datasets):
@@ -247,7 +250,6 @@ def export(
force=False,
update=False,
):
-
from kite.scene import Scene, UserIOWarning
from pyrocko.guts import dump
@@ -263,7 +265,7 @@ def save_covs(datasets, cov_mat="pred_v"):
for dataset in datasets
}
- outname = os.path.join(results_path, "%s_C_%s" % ("geodetic", cov_mat))
+ outname = os.path.join(results_path, f"geodetic_C_{cov_mat}")
logger.info('"geodetic covariances" to: %s', outname)
num.savez(outname, **covs)
@@ -290,7 +292,7 @@ def get_filename(attr, ending="csv"):
for campaign in campaigns:
model_camp = gnss.GNSSCampaign(
stations=copy.deepcopy(campaign.stations),
- name="%s_model" % campaign.name,
+ name=f"{campaign.name}_model",
)
dataset_to_result = {}
@@ -317,7 +319,7 @@ def get_filename(attr, ending="csv"):
try:
scene_path = os.path.join(config.datadir, dataset.name)
logger.info(
- "Loading full resolution kite scene: %s" % scene_path
+ f"Loading full resolution kite scene: {scene_path}"
)
scene = Scene.load(scene_path)
except UserIOWarning:
@@ -328,17 +330,16 @@ def get_filename(attr, ending="csv"):
continue
for attr in ["processed_obs", "processed_syn", "processed_res"]:
-
filename = get_filename(attr, ending="csv")
displacements = getattr(result, attr)
dataset.export_to_csv(filename, displacements)
- logger.info("Stored CSV file to: %s" % filename)
+ logger.info(f"Stored CSV file to: {filename}")
filename = get_filename(attr, ending="yml")
vals = map_displacement_grid(displacements, scene)
scene.displacement = vals
scene.save(filename)
- logger.info("Stored kite scene to: %s" % filename)
+ logger.info(f"Stored kite scene to: {filename}")
# export stdz residuals
self.analyse_noise(point)
@@ -356,13 +357,13 @@ def init_hierarchicals(self, problem_config):
Ramp estimation in azimuth and range direction of a radar scene and/or
Rotation of GNSS stations around an Euler pole
"""
- hierarchicals = problem_config.hierarchicals
self._hierarchicalnames = []
+ hierarchicals = problem_config.hierarchicals
for number, corr in enumerate(
self.config.corrections_config.iter_corrections()
):
logger.info(
- "Evaluating config for %s corrections " "for datasets..." % corr.feature
+ f"Evaluating config for {corr.feature} corrections for datasets..."
)
if corr.enabled:
for data in self.datasets:
@@ -375,11 +376,8 @@ def init_hierarchicals(self, problem_config):
for hierarchical_name in hierarchical_names:
if not corr.enabled and hierarchical_name in hierarchicals:
-
raise ConfigInconsistentError(
- "%s %s disabled, but they are defined"
- " in the problem configuration"
- " (hierarchicals)!" % (corr.feature, data.name)
+ f"{corr.feature} {data.name} disabled, but they are defined in the problem configuration (hierarchicals)!"
)
if (
@@ -388,46 +386,34 @@ def init_hierarchicals(self, problem_config):
and data.name in corr.dataset_names
):
raise ConfigInconsistentError(
- "%s %s corrections enabled, but they are"
- " not defined in the problem configuration!"
- " (hierarchicals)" % (corr.feature, data.name)
+ f"{corr.feature} {data.name} corrections enabled, but they are not defined in the problem configuration! (hierarchicals)"
)
- param = hierarchicals[hierarchical_name]
if hierarchical_name not in self.hierarchicals:
+ param = hierarchicals[hierarchical_name]
if not num.array_equal(param.lower, param.upper):
kwargs = dict(
name=param.name,
shape=param.dimension,
lower=param.lower,
upper=param.upper,
- testval=param.testvalue,
+ initval=param.testvalue,
transform=None,
dtype=tconfig.floatX,
)
- try:
- self.hierarchicals[hierarchical_name] = Uniform(
- **kwargs
- )
- except TypeError:
- kwargs.pop("name")
- self.hierarchicals[
- hierarchical_name
- ] = Uniform.dist(**kwargs)
+ self.hierarchicals[
+ hierarchical_name
+ ] = init_uniform_random(kwargs)
self._hierarchicalnames.append(hierarchical_name)
else:
logger.info(
- "not solving for %s, got fixed at %s"
- % (
- param.name,
- utility.list2string(param.lower.flatten()),
- )
+ f"not solving for {param.name}, got fixed at {utility.list2string(param.lower.flatten())}"
)
self.hierarchicals[hierarchical_name] = param.lower
else:
- logger.info("No %s correction!" % corr.feature)
+ logger.info(f"No {corr.feature} correction!")
logger.info("Initialized %i hierarchical parameters." % len(self.hierarchicals))
@@ -460,11 +446,11 @@ def update_llks(self, point):
with numpy array-like items and variable name keys
"""
results = self.assemble_results(point)
- for l, result in enumerate(results):
- choli = self.datasets[l].covariance.chol_inverse
+ for i_l, result in enumerate(results):
+ choli = self.datasets[i_l].covariance.chol_inverse
tmp = choli.dot(result.processed_res)
_llk = num.asarray([num.dot(tmp, tmp)])
- self._llks[l].set_value(_llk)
+ self._llks[i_l].set_value(_llk)
def get_variance_reductions(self, point, results=None, weights=None):
"""
@@ -505,7 +491,6 @@ def get_variance_reductions(self, point, results=None, weights=None):
var_reds = OrderedDict()
for dataset, weight, result in zip(self.datasets, weights, results):
-
hp = get_hypervalue_from_point(
point, dataset, counter, hp_specific=hp_specific
)
@@ -579,6 +564,8 @@ class GeodeticSourceComposite(GeodeticComposite):
directory of the model project, where to find the data
sources : list
of :class:`pyrocko.gf.seismosizer.Source`
+ mapping : list
+ of dict of varnames and their sizes
events : list
of :class:`pyrocko.model.Event`
contains information of reference event, coordinates of reference
@@ -587,19 +574,24 @@ class GeodeticSourceComposite(GeodeticComposite):
if true initialise object for hyper parameter optimization
"""
- def __init__(self, gc, project_dir, sources, events, hypers=False):
-
+ def __init__(self, gc, project_dir, sources, mapping, events, hypers=False):
super(GeodeticSourceComposite, self).__init__(
gc, project_dir, events, hypers=hypers
)
- self.engine = LocalEngine(store_superdirs=[gc.gf_config.store_superdir])
+ if isinstance(gc.gf_config, bconfig.GeodeticGFConfig):
+ self.engine = LocalEngine(store_superdirs=[gc.gf_config.store_superdir])
+ elif isinstance(gc.gf_config, bconfig.BEMConfig):
+ from beat.bem import BEMEngine
+
+ self.engine = BEMEngine(gc.gf_config)
self.sources = sources
+ self.mapping = mapping
- def __getstate__(self):
- self.engine.close_cashed_stores()
- return self.__dict__.copy()
+ @property
+ def n_sources_total(self):
+ return len(self.sources)
def point2sources(self, point):
"""
@@ -609,64 +601,35 @@ def point2sources(self, point):
tpoint.update(self.fixed_rvs)
tpoint = utility.adjust_point_units(tpoint)
- # remove hyperparameters from point
- hps = self.config.get_hypernames()
-
- for hyper in hps:
- if hyper in tpoint:
- tpoint.pop(hyper)
-
- source_params = list(self.sources[0].keys())
- for param in list(tpoint.keys()):
- if param not in source_params:
- tpoint.pop(param)
-
- source_points = utility.split_point(tpoint)
+ source_points = utility.split_point(
+ tpoint,
+ mapping=self.mapping,
+ weed_params=True,
+ )
for i, source in enumerate(self.sources):
utility.update_source(source, **source_points[i])
# reset source time may result in store error otherwise
source.time = 0.0
- def get_pyrocko_events(self, point=None):
- """
- Transform sources to pyrocko events.
-
- Returns
- -------
- events : list
- of :class:`pyrocko.model.Event`
- """
-
- if point is not None:
- self.point2sources(point)
-
- events = []
- target = self.targets[0]
- store = self.engine.get_store(target.store_id)
- for source in self.sources:
- events.append(source.pyrocko_event(store=store, target=target))
-
- return events
-
def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
"""
Get geodetic likelihood formula for the model built. Has to be called
within a with model context.
- Part of the pymc3 model.
+ Part of the pymc model.
Parameters
----------
input_rvs : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
fixed_rvs : dict
of :class:`numpy.array`
hyperparams : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
problem_config : :class:`config.ProblemConfig`
Returns
-------
- posterior_llk : :class:`theano.tensor.Tensor`
+ posterior_llk : :class:`pytensor.tensor.Tensor`
"""
hp_specific = self.config.dataset_specific_residual_noise_estimation
tpoint = problem_config.get_test_point()
@@ -691,7 +654,6 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
tt.cast((self.sdata - los_disp) * self.sodws, tconfig.floatX)
)
- self.init_hierarchicals(problem_config)
self.analyse_noise(tpoint)
self.init_weights()
if self.config.corrections_config.has_enabled_corrections:
@@ -705,30 +667,55 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
llk = Deterministic(self._like_name, logpts)
return llk.sum()
+ def get_pyrocko_events(self, point=None):
+ """
+ Transform sources to pyrocko events.
-class GeodeticGeometryComposite(GeodeticSourceComposite):
- def __init__(self, gc, project_dir, sources, events, hypers=False):
+ Returns
+ -------
+ events : list
+ of :class:`pyrocko.model.Event`
+ """
+ if point is not None:
+ self.point2sources(point)
+
+ target = self.targets[0]
+ store = self.engine.get_store(target.store_id)
+ return [
+ source.pyrocko_event(store=store, target=target) for source in self.sources
+ ]
+
+
+class GeodeticGeometryComposite(GeodeticSourceComposite):
+ def __init__(self, gc, project_dir, sources, mapping, events, hypers=False):
super(GeodeticGeometryComposite, self).__init__(
- gc, project_dir, sources, events, hypers=hypers
+ gc, project_dir, sources, mapping, events, hypers=hypers
)
+ logger.info("Initialising geometry geodetic composite ...")
if not hypers:
# synthetics generation
logger.debug("Initialising synthetics functions ... \n")
- self.get_synths = theanof.GeoSynthesizer(
- engine=self.engine, sources=self.sources, targets=self.targets
+ self.get_synths = pytensorf.GeoSynthesizer(
+ engine=self.engine,
+ sources=self.sources,
+ targets=self.targets,
+ mapping=mapping,
)
- def get_synthetics(self, point, **kwargs):
+ def __getstate__(self):
+ self.engine.close_cashed_stores()
+ return self.__dict__.copy()
+
+ def get_synthetics(self, point):
"""
Get synthetics for given point in solution space.
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
- kwargs especially to change output of the forward model
Returns
-------
@@ -737,7 +724,10 @@ def get_synthetics(self, point, **kwargs):
self.point2sources(point)
displacements = heart.geo_synthetics(
- engine=self.engine, targets=self.targets, sources=self.sources, **kwargs
+ engine=self.engine,
+ targets=self.targets,
+ sources=self.sources,
+ outmode="stacked_arrays",
)
synths = []
@@ -786,13 +776,14 @@ def update_weights(self, point, n_jobs=1, plot=False):
for i, data in enumerate(self.datasets):
crust_targets = heart.init_geodetic_targets(
datasets=[data],
+ event=self.event,
earth_model_name=gc.gf_config.earth_model_name,
interpolation=gc.interpolation,
crust_inds=crust_inds,
sample_rate=gc.gf_config.sample_rate,
)
- logger.debug("Track %s" % data.name)
+ logger.debug(f"Track {data.name}")
cov_pv = cov.geodetic_cov_velocity_models(
engine=self.engine,
sources=self.sources,
@@ -805,48 +796,44 @@ def update_weights(self, point, n_jobs=1, plot=False):
cov_pv = utility.ensure_cov_psd(cov_pv)
data.covariance.pred_v = cov_pv
- choli = data.covariance.chol_inverse
-
- self.weights[i].set_value(choli)
- data.covariance.update_slog_pdet()
else:
logger.info(
"Not updating geodetic velocity model-covariances because "
"number of model variations is too low! < %i" % thresh
)
+ # update shared weights from covariance matrices
+ for i, data in enumerate(self.datasets):
+ choli = data.covariance.chol_inverse
-class GeodeticInterseismicComposite(GeodeticSourceComposite):
- def __init__(self, gc, project_dir, sources, events, hypers=False):
+ self.weights[i].set_value(choli)
+ data.covariance.update_slog_pdet()
- super(GeodeticInterseismicComposite, self).__init__(
- gc, project_dir, sources, events, hypers=hypers
- )
- for source in sources:
- if not isinstance(source, RectangularSource):
- raise TypeError("Sources have to be RectangularSources!")
+class GeodeticBEMComposite(GeodeticSourceComposite):
+ def __init__(self, gc, project_dir, sources, mapping, events, hypers=False):
+ super(GeodeticBEMComposite, self).__init__(
+ gc, project_dir, sources, mapping, events, hypers=hypers
+ )
+ logger.info("Initialising BEM geodetic composite ...")
if not hypers:
- self._lats = self.Bij.l2a([data.lats for data in self.datasets])
- self._lons = self.Bij.l2a([data.lons for data in self.datasets])
-
- self.get_synths = theanof.GeoInterseismicSynthesizer(
- lats=self._lats,
- lons=self._lons,
+ # synthetics generation
+ logger.debug("Initialising synthetics functions ... \n")
+ self.get_synths = pytensorf.GeoSynthesizer(
engine=self.engine,
+ sources=self.sources,
targets=self.targets,
- sources=sources,
- reference=self.event,
+ mapping=mapping,
)
- def get_synthetics(self, point, **kwargs):
+ def get_synthetics(self, point):
"""
Get synthetics for given point in solution space.
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
kwargs especially to change output of the forward model
@@ -854,34 +841,97 @@ def get_synthetics(self, point, **kwargs):
-------
list with :class:`numpy.ndarray` synthetics for each target
"""
- tpoint = copy.deepcopy(point)
- tpoint.update(self.fixed_rvs)
- spoint, bpoint = seperate_point(tpoint)
+ self.point2sources(point)
- self.point2sources(spoint)
+ displacements = heart.geo_synthetics(
+ engine=self.engine,
+ targets=self.targets,
+ sources=self.sources,
+ outmode="arrays",
+ )
synths = []
- for target, data in zip(self.targets, self.datasets):
- disp = geo_backslip_synthetics(
- engine=self.engine,
- sources=self.sources,
- targets=[target],
- lons=target.lons,
- lats=target.lats,
- reference=self.event,
- **bpoint
- )
- synths.append((disp * data.los_vector).sum(axis=1))
+ for disp, data in zip(displacements, self.datasets):
+ los_d = (disp * data.los_vector).sum(axis=1)
+ synths.append(los_d)
+
+ if self.config.corrections_config.has_enabled_corrections:
+ synths = self.apply_corrections(synths, point=point, operation="+")
return synths
def update_weights(self, point, n_jobs=1, plot=False):
+ """
+ Updates weighting matrixes (in place) with respect to the point in the
+ solution space.
+
+ Parameters
+ ----------
+ point : dict
+ with numpy array-like items and variable name keys
+ """
+ gc = self.config
if not self.weights:
self.init_weights()
- logger.warning("Not implemented yet!")
- raise NotImplementedError("Not implemented yet!")
+ self.point2sources(point)
+
+ # update data covariances in case model dependent non-toeplitz
+ if self.config.noise_estimator.structure == "non-toeplitz":
+ logger.info("Updating data-covariances ...")
+ self.analyse_noise(point)
+
+ crust_inds = range(*gc.gf_config.n_variations)
+ thresh = 5
+ if len(crust_inds) > thresh:
+ raise NotImplementedError(
+ "Needs updating for this composite to vary elastic parameters."
+ )
+
+ logger.info("Updating geodetic velocity model-covariances ...")
+ if self.config.noise_estimator.structure == "non-toeplitz":
+ logger.warning(
+ "Non-toeplitz estimation in combination with model "
+ "prediction covariances is still EXPERIMENTAL and results"
+ " should be interpreted with care!!"
+ )
+
+ for i, data in enumerate(self.datasets):
+ crust_targets = heart.init_geodetic_targets(
+ datasets=[data],
+ event=self.event,
+ earth_model_name=gc.gf_config.earth_model_name,
+ interpolation=gc.interpolation,
+ crust_inds=crust_inds,
+ sample_rate=gc.gf_config.sample_rate,
+ )
+
+ logger.debug("Track %s" % data.name)
+ cov_pv = cov.geodetic_cov_velocity_models(
+ engine=self.engine,
+ sources=self.sources,
+ targets=crust_targets,
+ dataset=data,
+ plot=plot,
+ event=self.event,
+ n_jobs=1,
+ )
+
+ cov_pv = utility.ensure_cov_psd(cov_pv)
+ data.covariance.pred_v = cov_pv
+ else:
+ logger.info(
+ "Not updating geodetic velocity model-covariances because "
+ "number of model variations is too low! < %i" % thresh
+ )
+
+ # update shared weights from covariance matrices
+ for i, data in enumerate(self.datasets):
+ choli = data.covariance.chol_inverse
+
+ self.weights[i].set_value(choli)
+ data.covariance.update_slog_pdet()
class GeodeticDistributerComposite(GeodeticComposite):
@@ -891,7 +941,6 @@ class GeodeticDistributerComposite(GeodeticComposite):
"""
def __init__(self, gc, project_dir, events, hypers=False):
-
super(GeodeticDistributerComposite, self).__init__(
gc, project_dir, events, hypers=hypers
)
@@ -917,7 +966,7 @@ def load_gfs(self, crust_inds=None, make_shared=True):
crust_inds : list
of int to indexes of Green's Functions
make_shared : bool
- if True transforms gfs to :class:`theano.shared` variables
+ if True transforms gfs to :class:`pytensor.shared` variables
"""
if crust_inds is None:
@@ -994,16 +1043,16 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
Parameters
----------
input_rvs : list
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
hyperparams : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
Returns
-------
- llk : :class:`theano.tensor.Tensor`
+ llk : :class:`pytensor.tensor.Tensor`
log-likelihood for the distributed slip
"""
- logger.info("Loading %s Green's Functions" % self.name)
+ logger.info(f"Loading {self.name} Green's Functions")
self.load_gfs(
crust_inds=[self.config.gf_config.reference_model_idx], make_shared=False
)
@@ -1032,7 +1081,6 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
tt.cast((self.sdata - mu) * self.sodws, tconfig.floatX)
)
- self.init_hierarchicals(problem_config)
if self.config.corrections_config.has_enabled_corrections:
residuals = self.apply_corrections(residuals)
@@ -1041,16 +1089,15 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
)
llk = Deterministic(self._like_name, logpts)
-
return llk.sum()
- def get_synthetics(self, point, outmode="data"):
+ def get_synthetics(self, point):
"""
Get synthetics for given point in solution space.
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
kwargs especially to change output of the forward model
@@ -1149,12 +1196,15 @@ def update_weights(self, point, n_jobs=1, plot=False):
cov_pv = utility.ensure_cov_psd(cov_pv)
data.covariance.pred_v = cov_pv
- choli = data.covariance.chol_inverse
-
- self.weights[i].set_value(choli)
- data.covariance.update_slog_pdet()
else:
logger.info(
"Not updating geodetic velocity model-covariances because "
"number of model variations is too low! < %i" % thresh
)
+
+ # update shared weights from covariance matrices
+ for i, data in enumerate(self.datasets):
+ choli = data.covariance.chol_inverse
+
+ self.weights[i].set_value(choli)
+ data.covariance.update_slog_pdet()
diff --git a/beat/models/laplacian.py b/beat/models/laplacian.py
index ec44fe51..7b9af406 100644
--- a/beat/models/laplacian.py
+++ b/beat/models/laplacian.py
@@ -2,15 +2,15 @@
from logging import getLogger
import numpy as num
-from pymc3 import Deterministic
-from theano import config as tconfig
-from theano import shared
-from theano import tensor as tt
+from pymc import Deterministic
+from pytensor import config as tconfig
+from pytensor import shared
+from pytensor import tensor as tt
from beat import config as bconfig
from beat.heart import log_determinant
from beat.models.base import Composite, FaultGeometryNotFoundError
-from beat.utility import load_objects, distances
+from beat.utility import distances, load_objects
logger = getLogger("ffi.laplacian")
@@ -26,7 +26,6 @@
class LaplacianDistributerComposite(Composite):
def __init__(self, config, project_dir, events, hypers):
-
super(LaplacianDistributerComposite, self).__init__(events)
self.config = config
@@ -100,22 +99,22 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
"""
Get smoothing likelihood formula for the model built. Has to be called
within a with model context.
- Part of the pymc3 model.
+ Part of the pymc model.
Parameters
----------
input_rvs : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
fixed_rvs : dict
of :class:`numpy.array` here only dummy
hyperparams : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
problem_config : :class:`config.ProblemConfig`
here it is not used
Returns
-------
- posterior_llk : :class:`theano.tensor.Tensor`
+ posterior_llk : :class:`pytensor.tensor.Tensor`
"""
logger.info("Initialising Laplacian smoothing operator ...")
@@ -127,12 +126,12 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
self.input_rvs.update(fixed_rvs)
logpts = tt.zeros((self.n_t), tconfig.floatX)
- for l, var in enumerate(self.slip_varnames):
+ for i_l, var in enumerate(self.slip_varnames):
Ls = self.shared_smoothing_op.dot(input_rvs[var])
exponent = Ls.T.dot(Ls)
logpts = tt.set_subtensor(
- logpts[l : l + 1],
+ logpts[i_l : i_l + 1],
self._eval_prior(hyperparams[hp_name], exponent=exponent),
)
@@ -149,10 +148,10 @@ def update_llks(self, point):
point : dict
with numpy array-like items and variable name keys
"""
- for l, varname in enumerate(self.slip_varnames):
+ for i_l, varname in enumerate(self.slip_varnames):
Ls = self.smoothing_op.dot(point[varname])
_llk = num.asarray([Ls.T.dot(Ls)])
- self._llks[l].set_value(_llk)
+ self._llks[i_l].set_value(_llk)
def get_hyper_formula(self, hyperparams):
"""
@@ -278,6 +277,8 @@ def get_smoothing_operator_correlated(patches_coords, correlation_function="gaus
"""
inter_patch_distances = distances(patches_coords, patches_coords)
+ # remove invalid diag at distance zero
+ num.fill_diagonal(inter_patch_distances, num.ones(inter_patch_distances.shape[0]))
if correlation_function == "gaussian":
a = 1 / num.power(inter_patch_distances, 2)
@@ -290,7 +291,8 @@ def get_smoothing_operator_correlated(patches_coords, correlation_function="gaus
'"nearest_neighbor" correlation function!'
)
- num.fill_diagonal(a, num.zeros(a.shape[0])) # remove invalid diag
+ # fill diagonal
+ num.fill_diagonal(a, num.zeros(a.shape[0]))
norm_distances = a.sum(0)
num.fill_diagonal(a, -norm_distances)
return a
diff --git a/beat/models/polarity.py b/beat/models/polarity.py
index 564fc4d8..146d30ec 100644
--- a/beat/models/polarity.py
+++ b/beat/models/polarity.py
@@ -3,12 +3,12 @@
from collections import OrderedDict
from logging import getLogger
-from pymc3 import Deterministic
+from pymc import Deterministic
from pyrocko.gf import LocalEngine
from pyrocko.guts import dump
from pyrocko.model import load_stations
-from theano import shared
-from theano.tensor import concatenate
+from pytensor import shared
+from pytensor.tensor import concatenate
from beat import config as bconfig
from beat.heart import (
@@ -17,11 +17,10 @@
ResultPoint,
init_polarity_targets,
pol_synthetics,
- results_for_export,
)
from beat.models.base import Composite
from beat.models.distributions import polarity_llk
-from beat.theanof import PolaritySynthesizer
+from beat.pytensorf import PolaritySynthesizer
from beat.utility import adjust_point_units, split_point, unique_list, update_source
logger = getLogger("polarity")
@@ -31,8 +30,7 @@
class PolarityComposite(Composite):
- def __init__(self, polc, project_dir, sources, events, hypers=False):
-
+ def __init__(self, polc, project_dir, sources, mapping, events, hypers=False):
super(PolarityComposite, self).__init__(events)
logger.debug("Setting up polarity structure ...\n")
@@ -42,6 +40,7 @@ def __init__(self, polc, project_dir, sources, events, hypers=False):
self._targets = None
self.synthesizers = {}
self.sources = sources
+ self.mapping = mapping
self.config = polc
self.gamma = shared(0.2, name="gamma", borrow=True)
self.fixed_rvs = {}
@@ -88,6 +87,10 @@ def __init__(self, polc, project_dir, sources, events, hypers=False):
)
self.wavemaps.append(pmap)
+ @property
+ def n_sources_total(self):
+ return len(self.sources)
+
@property
def is_location_fixed(self):
"""
@@ -99,7 +102,6 @@ def is_location_fixed(self):
return False
def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
-
self.input_rvs = input_rvs
self.fixed_rvs = fixed_rvs
@@ -113,7 +115,6 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
logpts = []
for i, pmap in enumerate(self.wavemaps):
-
self.synthesizers[i] = PolaritySynthesizer(
self.engine,
self.sources[pmap.config.event_idx],
@@ -156,17 +157,6 @@ def point2sources(self, point):
tpoint.update(self.fixed_rvs)
tpoint = adjust_point_units(tpoint)
- hps = self.config.get_hypernames()
- for hyper in hps:
- if hyper in tpoint:
- tpoint.pop(hyper)
-
- source_params = list(self.sources[0].keys())
-
- for param in list(tpoint.keys()):
- if param not in source_params:
- tpoint.pop(param)
-
if "time" in tpoint:
if self.nevents == 1:
tpoint["time"] += self.event.time # single event
@@ -174,7 +164,11 @@ def point2sources(self, point):
for i, event in enumerate(self.events): # multi event
tpoint["time"][i] += event.time
- source_points = split_point(tpoint)
+ source_points = split_point(
+ tpoint,
+ mapping=self.mapping,
+ weed_params=True,
+ )
for i, source in enumerate(self.sources):
update_source(source, **source_points[i])
@@ -225,7 +219,6 @@ def export(
force=False,
update=False,
):
-
results = self.assemble_results(point)
for i, result in enumerate(results):
# TODO need human readable format like e.g.: .csv
@@ -234,7 +227,6 @@ def export(
dump(result, filename=output)
def assemble_results(self, point, order="list"):
-
if point is None:
raise ValueError("A point has to be provided!")
@@ -267,7 +259,6 @@ def assemble_results(self, point, order="list"):
return results
def get_synthetics(self, point, **kwargs):
-
order = kwargs.pop("order", "list")
self.point2sources(point)
diff --git a/beat/models/problems.py b/beat/models/problems.py
index 0c9899ae..a6fad3b4 100644
--- a/beat/models/problems.py
+++ b/beat/models/problems.py
@@ -4,19 +4,21 @@
from logging import getLogger
import numpy as num
-import theano.tensor as tt
-from pymc3 import Deterministic, Model, Potential, Uniform
+import pytensor.tensor as tt
+from pymc import Deterministic, Model, Potential, draw
from pyrocko import util
from pyrocko.model import get_effective_latlon
-from theano import config as tconfig
+from pytensor import config as tconfig
from beat import config as bconfig
from beat.backend import ListArrayOrdering, ListToArrayBijection
from beat.models import geodetic, laplacian, polarity, seismic
+from beat.models.base import init_uniform_random
from beat.utility import list2string, transform_sources, weed_input_rvs
-# disable theano rounding warning
-tconfig.warn.round = False
+# disable pytensor rounding warning
+tconfig.compute_test_value = "pdb"
+tconfig.warn__round = False
km = 1000.0
@@ -27,7 +29,6 @@
class InconsistentNumberHyperparametersError(Exception):
-
context = (
"Configuration file has to be updated!"
+ " Hyperparameters have to be re-estimated. \n"
@@ -48,6 +49,9 @@ def __str__(self):
"geodetic": geodetic.GeodeticGeometryComposite,
}
+bem_composite_catalog = {
+ "geodetic": geodetic.GeodeticBEMComposite,
+}
distributer_composite_catalog = {
"seismic": seismic.SeismicDistributerComposite,
@@ -56,9 +60,6 @@ def __str__(self):
}
-interseismic_composite_catalog = {"geodetic": geodetic.GeodeticInterseismicComposite}
-
-
class Problem(object):
"""
Overarching class for the optimization problems to be solved.
@@ -74,7 +75,6 @@ class Problem(object):
_hierarchicalnames = None
def __init__(self, config, hypers=False):
-
self.model = None
self._like_name = "like"
@@ -156,7 +156,7 @@ def init_sampler(self, hypers=False):
backend=sc.backend,
)
t2 = time.time()
- logger.info("Compilation time: %f" % (t2 - t1))
+ logger.info("Compilation time: %f \n" % (t2 - t1))
elif sc.name == "SMC":
logger.info(
@@ -176,12 +176,12 @@ def init_sampler(self, hypers=False):
n_chains=sc.parameters.n_chains,
tune_interval=sc.parameters.tune_interval,
coef_variation=sc.parameters.coef_variation,
- proposal_dist=sc.parameters.proposal_dist,
+ proposal_name=sc.parameters.proposal_dist,
likelihood_name=self._like_name,
backend=sc.backend,
)
t2 = time.time()
- logger.info("Compilation time: %f" % (t2 - t1))
+ logger.info("Compilation time: %f \n" % (t2 - t1))
elif sc.name == "PT":
logger.info(
@@ -211,7 +211,7 @@ def init_sampler(self, hypers=False):
def built_model(self):
"""
- Initialise :class:`pymc3.Model` depending on problem composites,
+ Initialise :class:`pymc.Model` depending on problem composites,
geodetic and/or seismic data are included. Composites also determine
the problem to be solved.
"""
@@ -221,12 +221,11 @@ def built_model(self):
pc = self.config.problem_config
with Model() as self.model:
-
- self.rvs, self.fixed_params = pc.get_random_variables()
-
+ self.init_random_variables()
self.init_hyperparams()
+ self.init_hierarchicals()
- total_llk = tt.zeros((1), tconfig.floatX)
+ total_llk = tt.zeros((1, 1), tconfig.floatX)
for datatype, composite in self.composites.items():
if datatype in bconfig.modes_catalog[pc.mode].keys():
@@ -244,10 +243,8 @@ def built_model(self):
)
# deterministic RV to write out llks to file
- like = Deterministic("tmp", total_llk)
-
- # will overwrite deterministic name ...
- llk = Potential(self._like_name, like)
+ like = Potential("dummy", total_llk.sum())
+ llk = Deterministic(self._like_name, like) # noqa: F841
logger.info("Model building was successful! \n")
def plant_lijection(self):
@@ -256,14 +253,14 @@ def plant_lijection(self):
"""
if self.model is not None:
lordering = ListArrayOrdering(self.model.unobserved_RVs, intype="tensor")
- lpoint = [var.tag.test_value for var in self.model.unobserved_RVs]
+ lpoint = [var.get_test_value() for var in self.model.unobserved_RVs]
self.model.lijection = ListToArrayBijection(lordering, lpoint)
else:
raise AttributeError("Model needs to be built!")
def built_hyper_model(self):
"""
- Initialise :class:`pymc3.Model` depending on configuration file,
+ Initialise :class:`pymc.Model` depending on configuration file,
geodetic and/or seismic data are included. Estimates initial parameter
bounds for hyperparameters.
"""
@@ -282,10 +279,9 @@ def built_hyper_model(self):
point[param.name] = param.testvalue
with Model() as self.model:
-
self.init_hyperparams()
- total_llk = tt.zeros((1), tconfig.floatX)
+ total_llk = tt.zeros((1, 1), tconfig.floatX)
for composite in self.composites.values():
if hasattr(composite, "analyse_noise"):
@@ -296,8 +292,8 @@ def built_hyper_model(self):
total_llk += composite.get_hyper_formula(self.hyperparams)
- like = Deterministic("tmp", total_llk)
- llk = Potential(self._like_name, like)
+ like = Potential("dummy", total_llk.sum())
+ llk = Deterministic(self._like_name, like) # noqa: F841
logger.info("Hyper model building was successful!")
def get_random_point(self, include=["priors", "hierarchicals", "hypers"]):
@@ -310,19 +306,19 @@ def get_random_point(self, include=["priors", "hierarchicals", "hypers"]):
if "hierarchicals" in include:
for name, param in self.hierarchicals.items():
if not isinstance(param, num.ndarray):
- point[name] = param.random()
+ point[name] = draw(param)
if "priors" in include:
for param in pc.priors.values():
- shape = pc.get_parameter_shape(param)
- point[param.name] = param.random(shape)
+ size = pc.get_parameter_size(param)
+ point[param.name] = param.random(size)
if "hypers" in include:
if len(self.hyperparams) == 0:
self.init_hyperparams()
hps = {
- hp_name: param.random()
+ hp_name: draw(param)
for hp_name, param in self.hyperparams.items()
if not isinstance(param, num.ndarray)
}
@@ -372,16 +368,29 @@ def hierarchicalnames(self):
self.init_hierarchicals()
return self._hierarchicalnames
+ def init_random_variables(self):
+ """
+ Evaluate problem setup and initialize random variables and
+ fixed variables dictionaries.
+ """
+ (
+ rvs_kwargs,
+ self.fixed_params,
+ ) = self.config.problem_config.get_random_variables()
+
+ self.rvs = {}
+ for varname, kwargs in rvs_kwargs.items():
+ self.rvs[varname] = init_uniform_random(kwargs)
+
def init_hyperparams(self):
"""
- Evaluate problem setup and return hyperparameter dictionary.
+ Evaluate problem setup and initialize hyperparameter dictionary.
"""
pc = self.config.problem_config
hyperparameters = copy.deepcopy(pc.hyperparameters)
hyperparams = {}
n_hyp = 0
- modelinit = True
self._hypernames = []
for datatype, composite in self.composites.items():
hypernames = composite.get_hypernames()
@@ -404,18 +413,12 @@ def init_hyperparams(self):
shape=dimension,
lower=num.repeat(hyperpar.lower, ndata),
upper=num.repeat(hyperpar.upper, ndata),
- testval=num.repeat(hyperpar.testvalue, ndata),
+ initval=num.repeat(hyperpar.testvalue, ndata),
dtype=tconfig.floatX,
transform=None,
)
- try:
- hyperparams[hp_name] = Uniform(**kwargs)
-
- except TypeError:
- kwargs.pop("name")
- hyperparams[hp_name] = Uniform.dist(**kwargs)
- modelinit = False
+ hyperparams[hp_name] = init_uniform_random(kwargs)
n_hyp += dimension
self._hypernames.append(hyperpar.name)
@@ -437,8 +440,7 @@ def init_hyperparams(self):
" covered by datasets/datatypes."
)
- if modelinit:
- logger.info("Optimization for %i hyperparameters in total!", n_hyp)
+ logger.info("Initialized %i hyperparameters in total!", n_hyp)
self.hyperparams = hyperparams
@@ -469,7 +471,7 @@ def get_variance_reductions(self, point):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters, for which the VRs are calculated
"""
vrs = {}
@@ -496,7 +498,7 @@ def point2sources(self, point):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters, for which the sources are
updated
"""
@@ -509,7 +511,7 @@ def get_pyrocko_events(self, point=None):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters, for which the events are
returned
@@ -540,7 +542,7 @@ def update_weights(self, point, n_jobs=1, plot=False):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters, for which the covariance matrixes
with respect to velocity model uncertainties are calculated
n_jobs : int
@@ -569,7 +571,7 @@ def get_synthetics(self, point, **kwargs):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
kwargs especially to change output of seismic forward model
outmode = 'traces'/ 'array' / 'data'
@@ -624,34 +626,44 @@ class SourceOptimizer(Problem):
"""
def __init__(self, config, hypers=False):
-
super(SourceOptimizer, self).__init__(config, hypers)
pc = config.problem_config
+ n_sources_total = sum(pc.n_sources)
- if self.nevents != pc.n_sources and self.nevents != 1:
+ if self.nevents != n_sources_total and self.nevents != 1:
raise ValueError(
"Number of events and sources have to be equal or only one "
"event has to be used! Number if events %i and number of "
- "sources: %i!" % (self.nevents, pc.n_sources)
+ "sources: %i!" % (self.nevents, n_sources_total)
)
# Init sources
self.sources = []
- for i in range(pc.n_sources):
- if self.nevents > 1:
- event = self.events[i]
- else:
- event = self.event
+ running_idx = 0
+ for source_type, n_source in zip(pc.source_types, pc.n_sources):
+ for _ in range(n_source):
+ if self.nevents > 1:
+ event = self.events[running_idx]
+ else:
+ event = self.event
- source = bconfig.source_catalog[pc.source_type].from_pyrocko_event(event)
- source.stf = bconfig.stf_catalog[pc.stf_type](duration=event.duration)
+ logger.info(
+ "Using %s for %i sources for event %s",
+ source_type,
+ n_source,
+ event.__str__(),
+ )
- # hardcoded inversion for hypocentral time
- if source.stf is not None:
- source.stf.anchor = -1.0
+ source = bconfig.source_catalog[source_type].from_pyrocko_event(event)
+ source.stf = bconfig.stf_catalog[pc.stf_type](duration=event.duration)
- self.sources.append(source)
+ # hardcoded inversion for hypocentral time
+ if source.stf is not None:
+ source.stf.anchor = -1.0
+
+ running_idx += 1
+ self.sources.append(source)
class GeometryOptimizer(SourceOptimizer):
@@ -671,14 +683,19 @@ def __init__(self, config, hypers=False):
super(GeometryOptimizer, self).__init__(config, hypers)
pc = config.problem_config
+ if pc.mode == "geometry":
+ composite_catalog = geometry_composite_catalog
+ elif pc.mode == "bem":
+ composite_catalog = bem_composite_catalog
dsources = transform_sources(self.sources, pc.datatypes, pc.decimation_factors)
-
+ mappings = pc.get_variables_mapping()
for datatype in pc.datatypes:
- self.composites[datatype] = geometry_composite_catalog[datatype](
+ self.composites[datatype] = composite_catalog[datatype](
config[datatype + "_config"],
config.project_dir,
dsources[datatype],
+ mappings[datatype],
self.events,
hypers,
)
@@ -690,48 +707,6 @@ def __init__(self, config, hypers=False):
self.point2sources(tpoint)
-class InterseismicOptimizer(SourceOptimizer):
- """
- Uses the backslip-model in combination with the blockmodel to formulate an
- interseismic model.
-
- Parameters
- ----------
- config : :class:'config.BEATconfig'
- Contains all the information about the model setup and optimization
- boundaries, as well as the sampler parameters.
- """
-
- def __init__(self, config, hypers=False):
- logger.info("... Initialising Interseismic Optimizer ... \n")
-
- super(InterseismicOptimizer, self).__init__(config, hypers)
-
- pc = config.problem_config
-
- if pc.source_type == "RectangularSource":
- dsources = transform_sources(self.sources, pc.datatypes)
- else:
- raise TypeError(
- "Interseismic Optimizer has to be used with" " RectangularSources!"
- )
-
- for datatype in pc.datatypes:
- self.composites[datatype] = interseismic_composite_catalog[datatype](
- config[datatype + "_config"],
- config.project_dir,
- dsources[datatype],
- self.events,
- hypers,
- )
-
- self.config = config
-
- # updating source objects with fixed values
- point = self.get_random_point()
- self.point2sources(point)
-
-
class DistributionOptimizer(Problem):
"""
Defines the model setup to solve the linear slip-distribution and
@@ -845,7 +820,6 @@ def lsq_solution(self, point, plot=False):
ds.extend(displacements)
elif datatype == "seismic":
-
targets_gfs = [[] for i in range(composite.n_t)]
for pidx in range(npatches):
Gseis, dseis = composite.get_synthetics(
@@ -899,11 +873,10 @@ def lsq_solution(self, point, plot=False):
return point
-problem_modes = list(bconfig.modes_catalog.keys())
problem_catalog = {
- problem_modes[0]: GeometryOptimizer,
- problem_modes[1]: DistributionOptimizer,
- problem_modes[2]: InterseismicOptimizer,
+ bconfig.geometry_mode_str: GeometryOptimizer,
+ bconfig.ffi_mode_str: DistributionOptimizer,
+ bconfig.bem_mode_str: GeometryOptimizer,
}
diff --git a/beat/models/seismic.py b/beat/models/seismic.py
index 502932be..763a43e1 100644
--- a/beat/models/seismic.py
+++ b/beat/models/seismic.py
@@ -5,26 +5,25 @@
from time import time
import numpy as num
-import theano.tensor as tt
-from pymc3 import Deterministic, Uniform
+import pytensor.tensor as tt
+from pymc import Deterministic
from pyrocko.gf import LocalEngine
from pyrocko.trace import Trace
-from theano import config as tconfig
-from theano import shared
-from theano.printing import Print
-from theano.tensor import fft
+from pytensor import config as tconfig
+from pytensor import shared
from beat import config as bconfig
from beat import covariance as cov
-from beat import heart, theanof, utility
+from beat import heart, pytensorf, utility
from beat.ffi import get_gf_prefix, load_gf_library
from beat.models.base import (
Composite,
ConfigInconsistentError,
FaultGeometryNotFoundError,
get_hypervalue_from_point,
+ init_uniform_random,
)
-from beat.models.distributions import get_hyper_name, multivariate_normal_chol
+from beat.models.distributions import multivariate_normal_chol
logger = getLogger("seismic")
@@ -54,7 +53,6 @@ class SeismicComposite(Composite):
_hierarchicalnames = None
def __init__(self, sc, events, project_dir, hypers=False):
-
super(SeismicComposite, self).__init__(events)
logger.debug("Setting up seismic structure ...\n")
@@ -76,17 +74,20 @@ def __init__(self, sc, events, project_dir, hypers=False):
project_dir, bconfig.multi_event_seismic_data_name(i)
)
- logger.info(
- "Loading seismic data for event %i"
- " from: %s " % (i, seismic_data_path)
- )
- self.datahandlers.append(
- heart.init_datahandler(
- seismic_config=sc,
- seismic_data_path=seismic_data_path,
- responses_path=responses_path,
+ if os.path.exists(seismic_data_path):
+ logger.info(
+ "Loading seismic data for event %i"
+ " from: %s " % (i, seismic_data_path)
)
- )
+ self.datahandlers.append(
+ heart.init_datahandler(
+ seismic_config=sc,
+ seismic_data_path=seismic_data_path,
+ responses_path=responses_path,
+ )
+ )
+ else:
+ logger.info("Did not find seismic data for event %i." % i)
self.noise_analyser = cov.SeismicNoiseAnalyser(
structure=sc.noise_estimator.structure,
@@ -100,13 +101,19 @@ def __init__(self, sc, events, project_dir, hypers=False):
for i, wc in enumerate(sc.waveforms):
logger.info('Initialising seismic wavemap for "%s" ...' % wc.name)
if wc.include:
- wmap = heart.init_wavemap(
- waveformfit_config=wc,
- datahandler=self.datahandlers[wc.event_idx],
- event=self.events[wc.event_idx],
- mapnumber=i,
- )
-
+ try:
+ wmap = heart.init_wavemap(
+ waveformfit_config=wc,
+ datahandler=self.datahandlers[wc.event_idx],
+ event=self.events[wc.event_idx],
+ mapnumber=i,
+ )
+ except IndexError:
+ raise IndexError(
+ "Did not find seismic data for event %i ! "
+ "Data for sub-events follows naming: "
+ "seismic_data_subevent_1.pkl, seismic_data_subevent_2.pkl, etc."
+ )
self.wavemaps.append(wmap)
else:
logger.info(
@@ -122,7 +129,6 @@ def __init__(self, sc, events, project_dir, hypers=False):
)
def _hyper2wavemap(self, hypername):
-
dummy = "_".join(hypername.split("_")[1:-1])
for wmap in self.wavemaps:
if wmap._mapid == dummy:
@@ -267,20 +273,14 @@ def init_hierarchicals(self, problem_config):
shape=param.dimension,
lower=param.lower,
upper=param.upper,
- testval=param.testvalue,
+ initval=param.testvalue,
transform=None,
dtype=tconfig.floatX,
)
- try:
- self.hierarchicals[hierarchical_name] = Uniform(
- **kwargs
- )
- except TypeError:
- kwargs.pop("name")
- self.hierarchicals[hierarchical_name] = Uniform.dist(
- **kwargs
- )
+ self.hierarchicals[hierarchical_name] = init_uniform_random(
+ kwargs
+ )
self._hierarchicalnames.append(hierarchical_name)
else:
@@ -330,7 +330,6 @@ def save_covs(wmap, cov_mat="pred_v"):
for traces, attribute in heart.results_for_export(
results=results, datatype="seismic"
):
-
filename = "%s_%i.mseed" % (attribute, stage_number)
outpath = os.path.join(results_path, filename)
try:
@@ -453,7 +452,7 @@ def assemble_results(
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
force : bool
force preparation of data with input params otherwise cached is
@@ -479,7 +478,6 @@ def assemble_results(
wmap_results = []
for j, obs_tr in enumerate(obs_proc_traces[i]):
-
taper = at.get_pyrocko_taper(float(obs_tr.tmin - at.a))
if outmode != "tapered_data":
@@ -555,7 +553,6 @@ def get_standardized_residuals(
stdz_residuals = OrderedDict()
for dataset, result, target in zip(self.datasets, results, self.targets):
-
hp = get_hypervalue_from_point(
point, dataset, counter, hp_specific=hp_specific
)
@@ -650,6 +647,8 @@ class SeismicGeometryComposite(SeismicComposite):
directory of the model project, where to find the data
sources : list
of :class:`pyrocko.gf.seismosizer.Source`
+ mapping : list
+ of dict of varnames and their sizes
events : list
of :class:`pyrocko.model.Event`
contains information of reference event(s), coordinates of reference
@@ -658,8 +657,7 @@ class SeismicGeometryComposite(SeismicComposite):
if true initialise object for hyper parameter optimization
"""
- def __init__(self, sc, project_dir, sources, events, hypers=False):
-
+ def __init__(self, sc, project_dir, sources, mapping, events, hypers=False):
super(SeismicGeometryComposite, self).__init__(
sc, events, project_dir, hypers=hypers
)
@@ -669,11 +667,16 @@ def __init__(self, sc, project_dir, sources, events, hypers=False):
self.choppers = {}
self.sources = sources
+ self.mapping = mapping
self.correction_name = "time_shift"
self.config = sc
+ @property
+ def n_sources_total(self):
+ return len(self.sources)
+
def point2sources(self, point):
"""
Updates the composite source(s) (in place) with the point values.
@@ -687,29 +690,24 @@ def point2sources(self, point):
tpoint.update(self.fixed_rvs)
tpoint = utility.adjust_point_units(tpoint)
- # remove hyperparameters from point
- hps = self.config.get_hypernames()
-
- for hyper in hps:
- if hyper in tpoint:
- tpoint.pop(hyper)
-
- source = self.sources[0]
- source_params = list(source.keys()) + list(source.stf.keys())
-
- for param in list(tpoint.keys()):
- if param not in source_params:
- tpoint.pop(param)
-
# update source times
if "time" in tpoint:
if self.nevents == 1:
tpoint["time"] += self.event.time # single event
else:
- for i, event in enumerate(self.events): # multi event
- tpoint["time"][i] += event.time
-
- source_points = utility.split_point(tpoint)
+ # careful! if setup with multi-source geodetic and geodetic + seismic
+ # the joint source needs to be first, TODO clean-up
+ times = tpoint["time"]
+ for i in range(times.size): # multi event
+ times[i] += self.events[i].time
+
+ tpoint["time"] = times
+
+ source_points = utility.split_point(
+ tpoint,
+ mapping=self.mapping,
+ weed_params=True,
+ )
for i, source in enumerate(self.sources):
utility.update_source(source, **source_points[i])
@@ -743,16 +741,16 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
Parameters
----------
input_rvs : list
- of :class:`pymc3.distribution.Distribution` of source parameters
+ of :class:`pymc.distribution.Distribution` of source parameters
fixed_rvs : dict
of :class:`numpy.array`
hyperparams : dict
- of :class:`pymc3.distribution.Distribution`
+ of :class:`pymc.distribution.Distribution`
problem_config : :class:`config.ProblemConfig`
Returns
-------
- posterior_llk : :class:`theano.tensor.Tensor`
+ posterior_llk : :class:`pytensor.tensor.Tensor`
"""
chop_bounds = ["b", "c"] # we want llk calculation only between b c
@@ -771,7 +769,6 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
t2 = time()
wlogpts = []
- self.init_hierarchicals(problem_config)
self.analyse_noise(tpoint, chop_bounds=chop_bounds)
self.init_weights()
if self.config.station_corrections:
@@ -809,7 +806,7 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
)
sources = [self.sources[wc.event_idx]]
- self.synthesizers[wmap._mapid] = theanof.SeisSynthesizer(
+ self.synthesizers[wmap._mapid] = pytensorf.SeisSynthesizer(
engine=self.engine,
sources=sources,
targets=wmap.targets,
@@ -821,6 +818,7 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
pre_stack_cut=self.config.pre_stack_cut,
station_corrections=self.config.station_corrections,
domain=wc.domain,
+ mapping=self.mapping,
)
synths, _ = self.synthesizers[wmap._mapid](self.input_rvs)
@@ -848,7 +846,7 @@ def get_synthetics(self, point, **kwargs):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
kwargs especially to change output of seismic forward model
outmode = 'traces'/ 'array' / 'data'
@@ -868,7 +866,7 @@ def get_synthetics(self, point, **kwargs):
sc = self.config
synths = []
obs = []
- for wmap in self.wavemaps:
+ for i, wmap in enumerate(self.wavemaps):
wc = wmap.config
if not wmap.is_prepared or force:
wmap.prepare_data(
@@ -916,7 +914,7 @@ def get_synthetics(self, point, **kwargs):
chop_bounds=chop_bounds,
nprocs=nprocs,
# plot=True,
- **kwargs
+ **kwargs,
)
if self.config.station_corrections and wc.domain == "time":
@@ -931,7 +929,6 @@ def get_synthetics(self, point, **kwargs):
tr.tmax = dtr.tmax
if wc.domain == "spectrum":
-
valid_spectrum_indices = wmap.get_valid_spectrum_indices(
chop_bounds=chop_bounds, pad_to_pow2=True
)
@@ -1001,7 +998,6 @@ def update_weights(self, point, n_jobs=1, plot=False, chop_bounds=["b", "c"]):
for channel in wmap.channels:
tidxs = wmap.get_target_idxs([channel])
for station, tidx in zip(wmap.stations, tidxs):
-
logger.debug(
"Channel %s of Station %s " % (channel, station.station)
)
@@ -1063,7 +1059,6 @@ class SeismicDistributerComposite(SeismicComposite):
"""
def __init__(self, sc, project_dir, events, hypers=False):
-
super(SeismicDistributerComposite, self).__init__(
sc, events, project_dir, hypers=hypers
)
@@ -1106,7 +1101,7 @@ def __init__(self, sc, project_dir, events, hypers=False):
)
self.sweepers.append(
- theanof.Sweeper(
+ pytensorf.Sweeper(
dgc.patch_lengths[idx],
n_p_dip,
n_p_strike,
@@ -1161,7 +1156,7 @@ def load_gfs(self, crust_inds=None, make_shared=True):
crust_inds : list
of int to indexes of Green's Functions
make_shared : bool
- if True transforms gfs to :class:`theano.shared` variables
+ if True transforms gfs to :class:`pytensor.shared` variables
"""
if not isinstance(crust_inds, list):
raise TypeError("crust_inds need to be a list!")
@@ -1212,7 +1207,6 @@ def load_gfs(self, crust_inds=None, make_shared=True):
self.gfs[key] = gfs
def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
-
# no a, d taper bounds as GF library saved between b c
chop_bounds = ["b", "c"]
@@ -1239,7 +1233,6 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
gfs.init_optimization()
self.init_weights()
- self.init_hierarchicals(problem_config)
if self.config.station_corrections:
logger.info(
"Initialized %i hierarchical parameters for "
@@ -1262,7 +1255,7 @@ def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
index=index,
positions_dip=nuc_dip[index],
positions_strike=nuc_strike[index],
- backend="theano",
+ backend="pytensor",
)
sf_patch_indexs = self.fault.cum_subfault_npatches[index : index + 2]
@@ -1360,7 +1353,7 @@ def get_synthetics(self, point, **kwargs):
Parameters
----------
- point : :func:`pymc3.Point`
+ point : :func:`pymc.Point`
Dictionary with model parameters
kwargs especially to change output of the forward model
outmode: stacked_traces/ tapered_data/ array
diff --git a/beat/parallel.py b/beat/parallel.py
index 7e42834a..d8001747 100644
--- a/beat/parallel.py
+++ b/beat/parallel.py
@@ -1,33 +1,36 @@
import multiprocessing
import signal
-import sys
import traceback
from collections import OrderedDict
from functools import wraps
from io import BytesIO
from itertools import count
from logging import getLogger
-from multiprocessing import reduction
+import cloudpickle
import numpy as num
-logger = getLogger("parallel")
+mp_context = multiprocessing
+# mp_context = multiprocessing.get_context("spawn")
+# monkey patch pickling in multiprocessing
-# for sharing memory across processes
-_shared_memory = OrderedDict()
-_tobememshared = set([])
+if False:
+ @classmethod
+ def dumps(cls, obj, protocol=None):
+ buf = BytesIO()
+ cls(buf, protocol).dump(obj)
+ return buf.getbuffer()
-@classmethod
-def dumps(cls, obj, protocol=None):
- buf = BytesIO()
- cls(buf, 4).dump(obj)
- return buf.getbuffer()
+ mp_context.reduction.ForkingPickler = cloudpickle.CloudPickler
+ mp_context.reduction.ForkingPickler.dumps = cloudpickle.dumps
+ mp_context.reduction.ForkingPickler.loads = cloudpickle.loads
+logger = getLogger("parallel")
-# monkey patch pickling in multiprocessing
-if sys.hexversion < 0x30600F0:
- reduction.ForkingPickler.dumps = dumps
+# for sharing memory across processes
+_shared_memory = OrderedDict()
+_tobememshared = set([])
def get_process_id():
@@ -35,7 +38,7 @@ def get_process_id():
Returns the process id of the current process
"""
try:
- current = multiprocessing.current_process()
+ current = mp_context.current_process()
n = current._identity[0]
except IndexError:
# in case of only one used core ...
@@ -50,7 +53,7 @@ def check_available_memory(filesize):
Parameters
----------
filesize : float
- in [Mb] megabyte
+ in [Mb] megabyte
"""
from psutil import virtual_memory
@@ -219,13 +222,13 @@ def paripool(
"""
def start_message(*globals):
- logger.debug("Starting %s" % multiprocessing.current_process().name)
+ logger.debug("Starting %s" % mp_context.current_process().name)
def callback(result):
logger.info("\n Feierabend! Done with the work!")
if nprocs is None:
- nprocs = multiprocessing.cpu_count()
+ nprocs = mp_context.cpu_count()
if chunksize is None:
chunksize = 1
@@ -237,7 +240,7 @@ def callback(result):
yield [function(*work)]
else:
- pool = multiprocessing.Pool(
+ pool = mp_context.Pool(
processes=nprocs, initializer=initializer, initargs=initargs
)
@@ -265,7 +268,7 @@ def callback(result):
yield pool.map_async(
_pay_worker, workers, chunksize=chunksize, callback=callback
).get(pool_timeout)
- except multiprocessing.TimeoutError:
+ except mp_context.TimeoutError:
logger.error("Overseer fell asleep. Fire everyone!")
pool.terminate()
except KeyboardInterrupt:
@@ -276,7 +279,7 @@ def callback(result):
pool.close()
pool.join()
# reset process counter for tqdm progressbar
- multiprocessing.process._process_counter = count(1)
+ mp_context.process._process_counter = count(1)
def memshare(parameternames):
@@ -287,7 +290,7 @@ def memshare(parameternames):
Parameters
----------
parameternames : list of str
- off names to :class:`theano.tensor.sharedvar.TensorSharedVariable`
+ off names to :class:`pytensor.tensor.sharedvar.TensorSharedVariable`
"""
for paramname in parameternames:
if not isinstance(paramname, str):
@@ -301,36 +304,36 @@ def memshare(parameternames):
def memshare_sparams(shared_params):
"""
- For each parameter in a list of Theano TensorSharedVariable
+ For each parameter in a list of pytensor TensorSharedVariable
we substitute the memory with a sharedctype using the
multiprocessing library.
The wrapped memory can then be used by other child processes
thereby synchronising different instances of a model across
processes (e.g. for multi cpu gradient descent using single cpu
- Theano code).
+ pytensor code).
Parameters
----------
shared_params : list
- of :class:`theano.tensor.sharedvar.TensorSharedVariable`
+ of :class:`pytensor.tensor.sharedvar.TensorSharedVariable`
Returns
-------
memshared_instances : list
of :class:`multiprocessing.sharedctypes.RawArray`
list of sharedctypes (shared memory arrays) that point
- to the memory used by the current process's Theano variable.
+ to the memory used by the current process's pytensor variable.
Notes
-----
Modified from:
- https://github.com/JonathanRaiman/theano_lstm/blob/master/theano_lstm/shared_memory.py
+ https://github.com/JonathanRaiman/pytensor_lstm/blob/master/pytensor_lstm/shared_memory.py
- # define some theano function:
+ # define some pytensor function:
myfunction = myfunction(20, 50, etc...)
- # wrap the memory of the Theano variables:
+ # wrap the memory of the pytensor variables:
memshared_instances = make_params_shared(myfunction.get_shared())
Then you can use this memory in child processes
@@ -343,7 +346,7 @@ def memshare_sparams(shared_params):
shape = original.shape
original.shape = size
logger.info("Allocating %s" % param.name)
- ctypes = multiprocessing.RawArray(
+ ctypes = mp_context.RawArray(
"f" if original.dtype == num.float32 else "d", size
)
@@ -358,12 +361,12 @@ def memshare_sparams(shared_params):
def borrow_memory(shared_param, memshared_instance, shape):
"""
Spawn different processes with the shared memory
- of your theano model's variables.
+ of your pytensor model's variables.
Parameters
----------
- shared_param : :class:`theano.tensor.sharedvar.TensorSharedVariable`
- the Theano shared variable where
+ shared_param : :class:`pytensor.tensor.sharedvar.TensorSharedVariable`
+ the pytensor shared variable where
shared memory should be used instead.
memshared_instance : :class:`multiprocessing.RawArray`
the memory shared across processes (e.g.from `memshare_sparams`)
@@ -373,20 +376,20 @@ def borrow_memory(shared_param, memshared_instance, shape):
Notes
-----
Modiefied from:
- https://github.com/JonathanRaiman/theano_lstm/blob/master/theano_lstm/shared_memory.py
+ https://github.com/JonathanRaiman/pytensor_lstm/blob/master/pytensor_lstm/shared_memory.py
- For each process in the target function run the theano_borrow_memory
+ For each process in the target function run the pytensor_borrow_memory
method on the parameters you want to have share memory across processes.
In this example we have a model called "mymodel" with parameters stored in
- a list called "params". We loop through each theano shared variable and
+ a list called "params". We loop through each pytensor shared variable and
call `borrow_memory` on it to share memory across processes.
Examples
--------
>>> def spawn_model(path, wrapped_params):
# prevent recompilation and arbitrary locks
- >>> theano.config.reoptimize_unpickled_function = False
- >>> theano.gof.compilelock.set_lock_status(False)
+ >>> pytensor.config.reoptimize_unpickled_function = False
+ >>> pytensor.gof.compilelock.set_lock_status(False)
# load your function from its pickled instance (from path)
>>> myfunction = MyFunction.load(path)
# for each parameter in your function
@@ -414,14 +417,14 @@ def borrow_memory(shared_param, memshared_instance, shape):
def borrow_all_memories(shared_params, memshared_instances):
"""
- Run theano_borrow_memory on a list of params and shared memory
+ Run pytensor_borrow_memory on a list of params and shared memory
sharedctypes.
Parameters
----------
shared_params : list
- of :class:`theano.tensor.sharedvar.TensorSharedVariable`
- the Theano shared variable where
+ of :class:`pytensor.tensor.sharedvar.TensorSharedVariable`
+ the pytensor shared variable where
shared memory should be used instead.
memshared_instances : dict of tuples
of :class:`multiprocessing.RawArray` and their shapes
@@ -430,7 +433,7 @@ def borrow_all_memories(shared_params, memshared_instances):
Notes
-----
Same as `borrow_memory` but for lists of shared memories and
- theano variables. See `borrow_memory`
+ pytensor variables. See `borrow_memory`
"""
for sparam in shared_params:
borrow_memory(sparam, *memshared_instances[sparam.name])
diff --git a/beat/plotting/__init__.py b/beat/plotting/__init__.py
index 3e22c05b..881c43c4 100644
--- a/beat/plotting/__init__.py
+++ b/beat/plotting/__init__.py
@@ -5,22 +5,22 @@
from .seismic import * # noqa
plots_catalog = {
- "correlation_hist": draw_correlation_hist,
- "stage_posteriors": draw_posteriors,
- "waveform_fits": draw_seismic_fits,
- "scene_fits": draw_scene_fits,
- "gnss_fits": draw_gnss_fits,
- "geodetic_covariances": draw_geodetic_covariances,
- "velocity_models": draw_earthmodels,
- "slip_distribution": draw_slip_dist,
- "slip_distribution_3d": draw_3d_slip_distribution,
- "hudson": draw_hudson,
- "lune": draw_lune_plot,
- "fuzzy_beachball": draw_fuzzy_beachball,
- "fuzzy_mt_decomp": draw_fuzzy_mt_decomposition,
- "moment_rate": draw_moment_rate,
- "station_map": draw_station_map_gmt,
- "station_variance_reductions": draw_station_variance_reductions,
+ "correlation_hist": draw_correlation_hist, # noqa: F405
+ "stage_posteriors": draw_posteriors, # noqa: F405
+ "waveform_fits": draw_seismic_fits, # noqa: F405
+ "scene_fits": draw_scene_fits, # noqa: F405
+ "gnss_fits": draw_gnss_fits, # noqa: F405
+ "geodetic_covariances": draw_geodetic_covariances, # noqa: F405
+ "velocity_models": draw_earthmodels, # noqa: F405
+ "slip_distribution": draw_slip_dist, # noqa: F405
+ "slip_distribution_3d": draw_3d_slip_distribution, # noqa: F405
+ "hudson": draw_hudson, # noqa: F405
+ "lune": draw_lune_plot, # noqa: F405
+ "fuzzy_beachball": draw_fuzzy_beachball, # noqa: F405
+ "fuzzy_mt_decomp": draw_fuzzy_mt_decomposition, # noqa: F405
+ "moment_rate": draw_moment_rate, # noqa: F405
+ "station_map": draw_station_map_gmt, # noqa: F405
+ "station_variance_reductions": draw_station_variance_reductions, # noqa: F405
}
@@ -33,23 +33,21 @@
"fuzzy_mt_decomp",
"hudson",
"lune",
- "fuzzy_beachball",
"station_variance_reductions",
]
geodetic_plots = ["scene_fits", "gnss_fits", "geodetic_covariances"]
-polarity_plots = ["fuzzy_beachball", "fuzzy_mt_decomp", "lune", "hudson", "station_map"]
-
-geometry_plots = ["correlation_hist", "velocity_models"]
-
-
-ffi_plots = ["moment_rate", "slip_distribution"]
+polarity_plots = ["fuzzy_mt_decomp", "lune", "hudson", "station_map"]
+geometry_plots = ["correlation_hist", "velocity_models", "fuzzy_beachball"]
+bem_plots = ["correlation_hist", "slip_distribution_3d", "fuzzy_beachball"]
+ffi_plots = ["moment_rate", "slip_distribution", "slip_distribution_3d"]
plots_mode_catalog = {
"geometry": common_plots + geometry_plots,
"ffi": common_plots + ffi_plots,
+ "bem": common_plots + bem_plots,
}
plots_datatype_catalog = {
diff --git a/beat/plotting/bem.py b/beat/plotting/bem.py
new file mode 100644
index 00000000..8c70a1cf
--- /dev/null
+++ b/beat/plotting/bem.py
@@ -0,0 +1,166 @@
+from matplotlib import pyplot as plt
+from pyrocko.plot import mpl_papersize
+from mpl_toolkits.mplot3d.art3d import Poly3DCollection
+from matplotlib.ticker import MaxNLocator
+
+from beat.bem import slip_comp_to_idx
+import numpy as num
+from .common import set_locator_axes, scale_axes, set_axes_equal_3d
+
+km = 1000.0
+
+
+def cb_round(value, decimal=3):
+ return num.round(value, decimal)
+
+
+def slip_distribution_3d(
+ discretized_sources, slip_vectors, perspective="150/30", debug=False
+):
+ # fontsize_title = 12
+ fontsize = 8
+
+ camera = [float(angle) for angle in perspective.split("/")]
+
+ fig = plt.figure(figsize=mpl_papersize("a5", "landscape"))
+ slip_comps = ["strike", "dip", "normal"]
+
+ axs = []
+ sources_coord_limits = (
+ num.dstack(
+ [dsource.get_minmax_triangles_xyz() for dsource in discretized_sources]
+ )
+ / km
+ )
+ min_limits = num.floor(sources_coord_limits.min(2).min(1)) * km
+ max_limits = num.ceil(sources_coord_limits.max(2).max(1)) * km
+ for j, comp in enumerate(slip_comps):
+ cmap = plt.get_cmap("hot") if comp == "normal" else plt.get_cmap("seismic")
+ ax = fig.add_subplot(
+ 1, len(slip_comps), j + 1, projection="3d", computed_zorder=False
+ )
+ for k, (dsource, slips3d) in enumerate(zip(discretized_sources, slip_vectors)):
+ pa_col = Poly3DCollection(
+ dsource.triangles_xyz,
+ )
+
+ a = slips3d[:, slip_comp_to_idx[comp]]
+
+ if comp in ["strike", "dip"]:
+ absmax = num.max([num.abs(a.min()), a.max()])
+ cbounds = [-cb_round(absmax), cb_round(absmax)]
+ else:
+ cbounds = [cb_round(a.min()), cb_round(a.max())]
+
+ assert a.size == dsource.n_triangles
+
+ ax.add_collection(pa_col)
+
+ if num.diff(cbounds) == 0:
+ colors = ["white" for _ in range(a.size)]
+ pa_col.set_facecolor(colors)
+ pa_col.set(edgecolor="k", linewidth=0.1, alpha=0.25)
+ else:
+ cbl = 0.1 + j * 0.3
+ cbb = 0.2 - k * 0.08
+ cbw = 0.15
+ cbh = 0.01
+
+ cbaxes = fig.add_axes([cbl, cbb, cbw, cbh])
+
+ pa_col.set_cmap(cmap)
+
+ pa_col.set_array(a)
+ pa_col.set_clim(*cbounds)
+ pa_col.set(edgecolor="k", linewidth=0.2, alpha=0.75)
+
+ cbs = plt.colorbar(
+ pa_col,
+ ax=ax,
+ ticks=cbounds,
+ cax=cbaxes,
+ orientation="horizontal",
+ )
+ cbs.set_label(f"{comp}-slip [m]", fontsize=fontsize)
+ cbs.ax.tick_params(labelsize=fontsize)
+
+ unit_vectors = getattr(dsource, f"unit_{comp}_vectors")
+
+ ax.quiver(
+ dsource.centroids[::3, 0],
+ dsource.centroids[::3, 1],
+ dsource.centroids[::3, 2],
+ unit_vectors[::3, 0],
+ unit_vectors[::3, 1],
+ unit_vectors[::3, 2],
+ color="k",
+ length=dsource.mesh_size,
+ linewidth=1.0,
+ )
+
+ if False:
+ # plot vector normals for debugging
+ unit_vectors = getattr(dsource, "unit_normal_vectors")
+
+ ax.quiver(
+ dsource.centroids[::, 0],
+ dsource.centroids[::, 1],
+ dsource.centroids[::, 2],
+ unit_vectors[::, 0],
+ unit_vectors[::, 1],
+ unit_vectors[::, 2],
+ color="k",
+ length=dsource.mesh_size,
+ normalize=True,
+ linewidth=1.0,
+ )
+ if debug:
+ for tri_idx in range(dsource.n_triangles):
+ ax.text(
+ dsource.centroids[tri_idx, 0],
+ dsource.centroids[tri_idx, 1],
+ dsource.centroids[tri_idx, 2],
+ tri_idx,
+ fontsize=6,
+ )
+
+ ax.tick_params(labelsize=fontsize, rotation=-30)
+ if j == 2:
+ ax.set_xlabel("East-Distance [km]", fontsize=fontsize)
+ ax.set_ylabel("North-Distance [km]", fontsize=fontsize)
+ ax.set_zlabel("Depth [km]", fontsize=fontsize)
+ else:
+ ax.set_xticklabels([])
+ ax.set_yticklabels([])
+ ax.set_zticklabels([])
+
+ ax.set_xlim([min_limits[0], max_limits[0]])
+ ax.set_ylim([min_limits[1], max_limits[1]])
+ ax.set_zlim([min_limits[2], max_limits[2]])
+
+ set_axes_equal_3d(ax, axes="xyz")
+
+ set_locator_axes(ax.get_xaxis(), MaxNLocator(nbins=3))
+ set_locator_axes(ax.get_yaxis(), MaxNLocator(nbins=3))
+ set_locator_axes(ax.get_zaxis(), MaxNLocator(nbins=3))
+
+ scale = {"scale": 1 / km}
+
+ scale_axes(ax.get_xaxis(), **scale)
+ scale_axes(ax.get_yaxis(), **scale)
+ scale_axes(ax.get_zaxis(), **scale)
+
+ # ax.invert_zaxis()
+ # ax.set_aspect(1)
+ ax.view_init(*camera[::-1])
+
+ fig.subplots_adjust(
+ left=0.03,
+ right=1.0 - 0.08,
+ bottom=0.06,
+ top=1.0 - 0.06,
+ wspace=0.0,
+ hspace=0.1,
+ )
+ # fig.tight_layout()
+ return fig, axs
diff --git a/beat/plotting/common.py b/beat/plotting/common.py
index ed033b70..a1d9458e 100644
--- a/beat/plotting/common.py
+++ b/beat/plotting/common.py
@@ -3,14 +3,13 @@
import numpy as num
from matplotlib import pyplot as plt
-from matplotlib.ticker import MaxNLocator
+from matplotlib.backends.backend_pdf import PdfPages
+from matplotlib.ticker import FixedLocator, MaxNLocator
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
-from pymc3 import quantiles
-from pyrocko import orthodrome as otd
-from pyrocko.guts import Bool, Dict, Int, List, Object, String, StringChoice, load
+from pyrocko.guts import Bool, Dict, Int, List, Object, String, StringChoice
from pyrocko.plot import mpl_graph_color, mpl_papersize
+from pytensor import config as tconfig
from scipy.stats import kde
-from theano import config as tconfig
from beat import utility
@@ -18,88 +17,6 @@
km = 1000.0
-u_n = "$[N]$"
-u_nm = "$[Nm]$"
-u_km = "$[km]$"
-u_km_s = "$[km/s]$"
-u_deg = "$[^{\circ}]$"
-u_deg_myr = "$[^{\circ} / myr]$"
-u_m = "$[m]$"
-u_v = "$[m^3]$"
-u_s = "$[s]$"
-u_rad = "$[rad]$"
-u_hyp = ""
-u_percent = "[$\%$]"
-u_nanostrain = "nstrain"
-
-plot_units = {
- "east_shift": u_km,
- "north_shift": u_km,
- "depth": u_km,
- "width": u_km,
- "length": u_km,
- "dip": u_deg,
- "dip1": u_deg,
- "dip2": u_deg,
- "strike": u_deg,
- "strike1": u_deg,
- "strike2": u_deg,
- "rake": u_deg,
- "rake1": u_deg,
- "rake2": u_deg,
- "mix": u_hyp,
- "volume_change": u_v,
- "diameter": u_km,
- "slip": u_m,
- "opening_fraction": u_hyp,
- "azimuth": u_deg,
- "bl_azimuth": u_deg,
- "amplitude": u_nm,
- "bl_amplitude": u_m,
- "locking_depth": u_km,
- "nucleation_dip": u_km,
- "nucleation_strike": u_km,
- "nucleation_x": u_hyp,
- "nucleation_y": u_hyp,
- "time_shift": u_s,
- "coupling": u_percent,
- "uperp": u_m,
- "uparr": u_m,
- "utens": u_m,
- "durations": u_s,
- "velocities": u_km_s,
- "fn": u_n,
- "fe": u_n,
- "fd": u_n,
- "mnn": u_nm,
- "mee": u_nm,
- "mdd": u_nm,
- "mne": u_nm,
- "mnd": u_nm,
- "med": u_nm,
- "magnitude": u_hyp,
- "eps_xx": u_nanostrain,
- "eps_yy": u_nanostrain,
- "eps_xy": u_nanostrain,
- "rotation": u_nanostrain,
- "pole_lat": u_deg,
- "pole_lon": u_deg,
- "omega": u_deg_myr,
- "w": u_rad,
- "v": u_rad,
- "kappa": u_deg,
- "sigma": u_deg,
- "h": u_deg,
- "distance": u_km,
- "delta_depth": u_km,
- "delta_time": u_s,
- "time": u_s,
- "duration": u_s,
- "peak_ratio": u_hyp,
- "h_": u_hyp,
- "like": u_hyp,
-}
-
def arccosdeg(x):
return num.rad2deg(num.arccos(x))
@@ -113,11 +30,13 @@ def arccosdeg(x):
def get_transform(varname):
+ def do_nothing(x):
+ return x
try:
new_varname, transform = transforms[varname]
except KeyError:
- transform = lambda x: x
+ transform = do_nothing
new_varname = varname
return new_varname, transform
@@ -138,7 +57,6 @@ def cbtick(x):
def plot_cov(target, point_size=20):
-
ax = plt.axes()
im = ax.scatter(
target.lons,
@@ -172,7 +90,6 @@ def plot_log_cov(cov_mat):
def get_gmt_config(gmtpy, fontsize=14, h=20.0, w=20.0):
-
if gmtpy.is_gmt5(version="newest"):
gmtconfig = {
"MAP_GRID_PEN_PRIMARY": "0.1p",
@@ -199,13 +116,6 @@ def get_gmt_config(gmtpy, fontsize=14, h=20.0, w=20.0):
return gmtconfig
-def hypername(varname):
- if varname in list(plot_units.keys()):
- return varname
- else:
- return "h_"
-
-
class PlotOptions(Object):
post_llk = String.T(
default="max",
@@ -293,6 +203,22 @@ def str_duration(t):
return s + "%.1f d" % (t / (24.0 * 3600.0))
+def get_llk_idx_to_trace(mtrace, point_llk="max"):
+ """
+ Return Point idx to multitrace
+
+ Parameters
+ ----------
+ mtrace: pm.MultiTrace
+ sampled result trace containing the posterior ensemble
+ point_llk: str
+ returning according point with 'max', 'min', 'mean' likelihood
+ """
+ llk = mtrace.get_values(varname="like", combine=True)
+ posterior_idxs = utility.get_fit_indexes(llk)
+ return posterior_idxs[point_llk]
+
+
def get_result_point(mtrace, point_llk="max"):
"""
Return Point dict from multitrace
@@ -311,12 +237,8 @@ def get_result_point(mtrace, point_llk="max"):
"""
if point_llk != "None":
- llk = mtrace.get_values(varname="like", combine=True)
-
- posterior_idxs = utility.get_fit_indexes(llk)
-
- point = mtrace.point(idx=posterior_idxs[point_llk])
-
+ idx = get_llk_idx_to_trace(mtrace, point_llk="max")
+ point = mtrace.point(idx=idx)
else:
point = None
@@ -324,7 +246,6 @@ def get_result_point(mtrace, point_llk="max"):
def hist_binning(mind, maxd, nbins=40):
-
step = ((maxd - mind) / nbins).astype(tconfig.floatX)
if step == 0:
@@ -352,6 +273,10 @@ def histplot_op(
):
"""
Modified from pymc3. Additional color argument.
+
+ data: array_like
+ samples of one group for the histogram are expected row-wise ordering.
+
"""
cumulative = kwargs.pop("cumulative", False)
@@ -374,16 +299,15 @@ def histplot_op(
else:
histtype = "step"
- for i in range(data.shape[1]):
- d = data[:, i]
- quants = quantiles(d, qlist=qlist)
+ for d in data:
+ quants = num.percentile(d, q=qlist)
- mind = quants[qlist[0]]
- maxd = quants[qlist[-1]]
+ mind = quants[0]
+ maxd = quants[-1]
if reference is not None:
- mind = num.minimum(mind, reference)
- maxd = num.maximum(maxd, reference)
+ mind = num.minimum(mind, reference).min()
+ maxd = num.maximum(maxd, reference).max()
if tstd is None:
tstd = num.std(d)
@@ -433,9 +357,10 @@ def histplot_op(
ax.set_xlim(leftb, rightb)
if cumulative:
# need left plot bound, leftb
- sigma_quants = quantiles(d, [5, 68, 95])
+ quants = [5, 68, 95]
+ sigma_quants = num.percentile(d, q=quants)
- for quantile, value in sigma_quants.items():
+ for quantile, value in zip(quants, sigma_quants):
quantile /= 100.0
if nsources == 1:
x = [leftb, value, value]
@@ -473,7 +398,6 @@ def histplot_op(
def hist2d_plot_op(ax, data_x, data_y, bins=(None, None), cmap=None):
-
if cmap is None:
cmap = plt.get_cmap("afmhot_r")
@@ -492,7 +416,6 @@ def hist2d_plot_op(ax, data_x, data_y, bins=(None, None), cmap=None):
def variance_reductions_hist_plot(axs, variance_reductions, labels):
-
n_vrs = len(variance_reductions)
if n_vrs != len(labels):
@@ -548,7 +471,6 @@ def kde2plot(x, y, grid=200, ax=None, **kwargs):
def spherical_kde_op(
lats0, lons0, lats=None, lons=None, grid_size=(200, 200), sigma=None
):
-
from beat.models.distributions import vonmises_fisher, vonmises_std
if sigma is None:
@@ -627,17 +549,23 @@ def hide_ticks(ax, axis="yaxis"):
tick.tick2line.set_visible(False)
-def scale_axes(axis, scale, offset=0.0):
+def scale_axes(axis, scale, offset=0.0, precision=1):
from matplotlib.ticker import ScalarFormatter
class FormatScaled(ScalarFormatter):
@staticmethod
def __call__(value, pos):
- return "{:,.1f}".format(offset + value * scale).replace(",", " ")
+ return f"{offset + value * scale:.{precision}f}"
axis.set_major_formatter(FormatScaled())
+def set_locator_axes(axis, locator):
+ axis.set_major_locator(locator)
+ ticks_loc = axis.get_majorticklocs().tolist()
+ axis.set_major_locator(FixedLocator(ticks_loc))
+
+
def set_anchor(sources, anchor):
for source in sources:
source.anchor = anchor
@@ -648,15 +576,6 @@ def get_gmt_colorstring_from_mpl(i):
return utility.list2string(color, "/")
-def get_latlon_ratio(lat, lon):
- """
- Get latlon ratio at given location
- """
- dlat_meters = otd.distance_accurate50m(lat, lon, lat - 1.0, lon)
- dlon_meters = otd.distance_accurate50m(lat, lon, lat, lon - 1.0)
- return dlat_meters / dlon_meters
-
-
def plot_inset_hist(
axes,
data,
@@ -670,7 +589,6 @@ def plot_inset_hist(
alpha=0.4,
background_alpha=1.0,
):
-
in_ax = inset_axes(
axes,
width="100%",
@@ -895,7 +813,6 @@ def get_nice_plot_bounds(dmin, dmax, override_mode="min-max"):
def plot_covariances(datasets, covariances):
-
cmap = plt.get_cmap("seismic")
ndata = len(covariances)
@@ -933,7 +850,6 @@ def plot_covariances(datasets, covariances):
cbw = 0.15
for kidx, (cov, dataset) in enumerate(zip(covariances, datasets)):
-
figidx, rowidx = utility.mod_i(kidx, ndmax)
axs = axes[figidx][rowidx, :]
@@ -946,11 +862,10 @@ def plot_covariances(datasets, covariances):
cbb = 0.06
vmin, vmax = cov.get_min_max_components()
- for l, attr in enumerate(["data", "pred_v"]):
+ for i_l, attr in enumerate(["data", "pred_v"]):
cmat = getattr(cov, attr)
- ax = axs[l]
+ ax = axs[i_l]
if cmat is not None and cmat.sum() != 0.0:
-
im = ax.imshow(
cmat,
cmap=cmap,
@@ -963,7 +878,7 @@ def plot_covariances(datasets, covariances):
yticker = MaxNLocator(nbins=2)
ax.xaxis.set_major_locator(xticker)
ax.yaxis.set_major_locator(yticker)
- if l == 0:
+ if i_l == 0:
ax.set_ylabel("Sample idx")
ax.set_xlabel("Sample idx")
ax.set_title(dataset.name)
@@ -988,8 +903,36 @@ def plot_covariances(datasets, covariances):
return figures, axes
-def get_weights_point(composite, best_point, config):
+def set_axes_equal_3d(ax, axes="xyz"):
+ """
+ Make axes of 3D plot have equal scale so that spheres appear as
+ spheres, cubes as cubes, etc..
+ This is one possible solution to Matplotlib's
+ ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
+
+ Parameters
+ ----------
+ ax: a matplotlib axis, e.g., as output from plt.gca().
+ """
+ def set_axes_radius(ax, origin, radius, axes=["xyz"]):
+ if "x" in axes:
+ ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
+
+ if "y" in axes:
+ ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
+
+ if "z" in axes:
+ ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
+
+ limits = num.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
+
+ origin = num.mean(limits, axis=1)
+ radius = 0.5 * num.max(num.abs(limits[:, 1] - limits[:, 0]))
+ set_axes_radius(ax, origin, radius, axes=axes)
+
+
+def get_weights_point(composite, best_point, config):
if composite.config.noise_estimator.structure == "non-toeplitz":
# nT run is done with test point covariances!
if config.sampler_config.parameters.update_covariances:
@@ -1002,3 +945,29 @@ def get_weights_point(composite, best_point, config):
tpoint = best_point
return tpoint
+
+
+def plot_exists(outpath, outformat, force):
+ outpath_tmp = f"{outpath}.{outformat}"
+ if os.path.exists(outpath_tmp) and not force and outformat != "display":
+ logger.warning("Plot exists! Use --force to overwrite!")
+ return True
+ else:
+ return False
+
+
+def save_figs(figs, outpath, outformat, dpi):
+ if outformat == "display":
+ plt.show()
+
+ elif outformat == "pdf":
+ filepath = f"{outpath}.pdf"
+ logger.info("saving figures to %s" % filepath)
+ with PdfPages(filepath) as opdf:
+ for fig in figs:
+ opdf.savefig(fig)
+ else:
+ for i, fig in enumerate(figs):
+ filepath = f"{outpath}_{i}.{outformat}"
+ logger.info("saving figure to %s" % filepath)
+ fig.savefig(filepath, dpi=dpi)
diff --git a/beat/plotting/ffi.py b/beat/plotting/ffi.py
index e3c9e374..3fb6197c 100644
--- a/beat/plotting/ffi.py
+++ b/beat/plotting/ffi.py
@@ -4,9 +4,8 @@
import numpy as num
import pyrocko.moment_tensor as mt
from matplotlib import pyplot as plt
-from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.collections import PatchCollection
-from matplotlib.patches import FancyArrow, Rectangle
+from matplotlib.patches import Rectangle
from matplotlib.ticker import FormatStrFormatter, MaxNLocator
from pyrocko import gmtpy
from pyrocko import orthodrome as otd
@@ -20,8 +19,8 @@
)
from beat import utility
-from beat.config import ffi_mode_str
-from beat.models import Stage, load_stage
+from beat.config import bem_mode_str, ffi_mode_str
+from beat.models import load_stage
from .common import (
draw_line_on_array,
@@ -29,7 +28,10 @@
get_gmt_config,
get_result_point,
km,
+ plot_exists,
+ save_figs,
scale_axes,
+ set_axes_equal_3d,
)
logger = logging.getLogger("plotting.ffi")
@@ -133,8 +135,7 @@ def draw_moment_rate(problem, po):
outpath = os.path.join(
problem.outfolder,
po.figure_dir,
- "moment_rate_%i_%s_%s_%i.%s"
- % (stage.number, ns_str, llk_str, po.nensemble, po.outformat),
+ "moment_rate_%i_%s_%s_%i" % (stage.number, ns_str, llk_str, po.nensemble),
)
ref_mrf_rates, ref_mrf_times = fault.get_moment_rate_function(
@@ -144,44 +145,39 @@ def draw_moment_rate(problem, po):
store=sc.engine.get_store(target.store_id),
)
- if not os.path.exists(outpath) or po.force:
- fig, ax = plt.subplots(
- nrows=1, ncols=1, figsize=mpl_papersize("a7", "landscape")
- )
- labelpos = mpl_margins(
- fig, left=5, bottom=4, top=1.5, right=0.5, units=fontsize
- )
- labelpos(ax, 2.0, 1.5)
- if mtrace is not None:
- nchains = len(mtrace)
- csteps = float(nchains) / po.nensemble
- idxs = num.floor(num.arange(0, nchains, csteps)).astype("int32")
- mrfs_rate = []
- mrfs_time = []
- for idx in idxs:
- point = mtrace.point(idx=idx)
- mrf_rate, mrf_time = fault.get_moment_rate_function(
- index=ns,
- point=point,
- target=target,
- store=sc.engine.get_store(target.store_id),
- )
- mrfs_rate.append(mrf_rate)
- mrfs_time.append(mrf_time)
+ if plot_exists(outpath, po.force):
+ return
- fuzzy_moment_rate(ax, mrfs_rate, mrfs_time)
+ fig, ax = plt.subplots(
+ nrows=1, ncols=1, figsize=mpl_papersize("a7", "landscape")
+ )
+ labelpos = mpl_margins(
+ fig, left=5, bottom=4, top=1.5, right=0.5, units=fontsize
+ )
+ labelpos(ax, 2.0, 1.5)
+ if mtrace is not None:
+ nchains = len(mtrace)
+ csteps = float(nchains) / po.nensemble
+ idxs = num.floor(num.arange(0, nchains, csteps)).astype("int32")
+ mrfs_rate = []
+ mrfs_time = []
+ for idx in idxs:
+ point = mtrace.point(idx=idx)
+ mrf_rate, mrf_time = fault.get_moment_rate_function(
+ index=ns,
+ point=point,
+ target=target,
+ store=sc.engine.get_store(target.store_id),
+ )
+ mrfs_rate.append(mrf_rate)
+ mrfs_time.append(mrf_time)
- ax.plot(ref_mrf_times, ref_mrf_rates, "-k", alpha=0.8, linewidth=1.0)
- format_axes(ax, remove=["top", "right"])
+ fuzzy_moment_rate(ax, mrfs_rate, mrfs_time)
- if po.outformat == "display":
- plt.show()
- else:
- logger.info("saving figure to %s" % outpath)
- fig.savefig(outpath, format=po.outformat, dpi=po.dpi)
+ ax.plot(ref_mrf_times, ref_mrf_rates, "-k", alpha=0.8, linewidth=1.0)
+ format_axes(ax, remove=["top", "right"])
- else:
- logger.info("Plot exists! Use --force to overwrite!")
+ save_figs([fig], outpath, po.outformat, po.dpi)
def source_geometry(
@@ -206,7 +202,6 @@ def source_geometry(
of :class:'beat.sources.RectangularSource'
"""
- from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
alpha = 0.7
@@ -247,41 +242,12 @@ def plot_subfault(ax, source, color, refloc):
alpha=alpha,
)
- def set_axes_radius(ax, origin, radius, axes=["xyz"]):
- if "x" in axes:
- ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
-
- if "y" in axes:
- ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
-
- if "z" in axes:
- ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
-
- def set_axes_equal(ax, axes="xyz"):
- """
- Make axes of 3D plot have equal scale so that spheres appear as
- spheres, cubes as cubes, etc..
- This is one possible solution to Matplotlib's
- ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
-
- Parameters
- ----------
- ax: a matplotlib axis, e.g., as output from plt.gca().
- """
-
- limits = num.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
-
- origin = num.mean(limits, axis=1)
- radius = 0.5 * num.max(num.abs(limits[:, 1] - limits[:, 0]))
- set_axes_radius(ax, origin, radius, axes=axes)
-
fig = plt.figure(figsize=mpl_papersize("a5", "landscape"))
ax = fig.add_subplot(111, projection="3d")
extfs = fault.get_all_subfaults()
arr_coords = []
for idx, (refs, exts) in enumerate(zip(ref_sources, extfs)):
-
plot_subfault(ax, exts, color=mpl_graph_color(idx), refloc=event)
plot_subfault(ax, refs, color=scolor("aluminium4"), refloc=event)
for i, patch in enumerate(fault.get_subfault_patches(idx)):
@@ -310,9 +276,8 @@ def set_axes_equal(ax, axes="xyz"):
)
if values is not None:
-
if cmap is None:
- cmap = plt.cm.get_cmap("RdYlBu_r")
+ cmap = plt.get_cmap("RdYlBu_r")
poly_patches = Poly3DCollection(verts=arr_coords, zorder=1, cmap=cmap)
poly_patches.set_array(values)
@@ -350,13 +315,13 @@ def set_axes_equal(ax, axes="xyz"):
ax.set_zlabel("Depth [km]")
ax.set_ylabel("North_shift [km]")
ax.set_xlabel("East_shift [km]")
- set_axes_equal(ax, axes="xy")
+ set_axes_equal_3d(ax, axes="xy")
strikes = num.array([extf.strike for extf in extfs])
- dips = num.array([extf.strike for extf in extfs])
-
azim = strikes.mean() - 270
- elev = dips.mean()
+
+ # dips = num.array([extf.strike for extf in extfs])
+ # elev = dips.mean()
logger.debug("Viewing azimuth %s and elevation angles %s", azim, ax.elev)
ax.view_init(ax.elev, azim)
@@ -459,15 +424,12 @@ def draw_quivers(
normalisation=None,
zorder=0,
):
-
# positive uperp is always dip-normal- have to multiply -1
angles = num.arctan2(-uperp, uparr) * mt.r2d + rake
slips = num.sqrt((uperp**2 + uparr**2)).ravel()
if normalisation is None:
- from beat.models.laplacian import distances
-
- centers = num.vstack((xgr, ygr)).T
+ # centers = num.vstack((xgr, ygr)).T
# interpatch_dists = distances(centers, centers)
normalisation = slips.max()
@@ -496,17 +458,20 @@ def draw_quivers(
num.ceil(num.max(slips * normalisation) * 10.0) / 10.0
)
- # ax.quiverkey(
- # quivers, 0.9, 0.8, quiver_legend_length,
- # '{} [m]'.format(quiver_legend_length), labelpos='E',
- # coordinates='figure')
-
+ ax.quiverkey(
+ quivers,
+ 0.9,
+ 0.8,
+ quiver_legend_length,
+ "{} [m]".format(quiver_legend_length),
+ labelpos="E",
+ coordinates="figure",
+ )
return quivers, normalisation
def draw_patches(
ax, fault, subfault_idx, patch_values, cmap, alpha, cbounds=None, xlim=None
):
-
lls = fault.get_subfault_patch_attributes(
subfault_idx, attributes=["bottom_left"]
)
@@ -647,10 +612,10 @@ def get_values_from_trace(mtrace, fault, varname, reference):
# rupture durations
if False:
- durations = transform(
- mtrace.get_values("durations", combine=True, squeeze=True)
- )
- std_durations = durations.std(axis=0)
+ # durations = transform(
+ # mtrace.get_values("durations", combine=True, squeeze=True)
+ # )
+ # std_durations = durations.std(axis=0)
# alphas = std_durations.min() / std_durations
fig2, ax2 = plt.subplots(
@@ -778,7 +743,7 @@ def get_values_from_trace(mtrace, fault, varname, reference):
ygr,
ext_source.rake,
color="black",
- draw_legend=True,
+ draw_legend=False,
normalisation=normalisation,
zorder=3,
)
@@ -798,7 +763,6 @@ class ModeError(Exception):
def draw_slip_dist(problem, po):
-
mode = problem.config.problem_config.mode
if mode != ffi_mode_str:
@@ -827,31 +791,23 @@ def draw_slip_dist(problem, po):
mtrace = None
stage_number = -1
+ outpath = os.path.join(
+ problem.outfolder,
+ po.figure_dir,
+ "slip_dist_%i_%s_%i" % (stage_number, llk_str, po.nensemble),
+ )
+
+ if plot_exists(outpath, po.outformat, po.force):
+ return
+
figs, axs = fault_slip_distribution(
fault, mtrace, reference=reference, nensemble=po.nensemble
)
- if po.outformat == "display":
- plt.show()
- else:
- outpath = os.path.join(
- problem.outfolder,
- po.figure_dir,
- "slip_dist_%i_%s_%i" % (stage_number, llk_str, po.nensemble),
- )
-
- logger.info("Storing slip-distribution to: %s" % outpath)
- if po.outformat == "pdf":
- with PdfPages(outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig, dpi=po.dpi)
- else:
- for i, fig in enumerate(figs):
- fig.savefig(outpath + "_%i.%s" % (i, po.outformat), dpi=po.dpi)
+ save_figs(figs, outpath, po.outformat, po.dpi)
def draw_3d_slip_distribution(problem, po):
-
varname_choices = ["coupling", "euler_slip", "slip_variation"]
if po.outformat == "svg":
@@ -859,7 +815,7 @@ def draw_3d_slip_distribution(problem, po):
mode = problem.config.problem_config.mode
- if mode != ffi_mode_str:
+ if mode not in [ffi_mode_str, bem_mode_str]:
raise ModeError(
"Wrong optimization mode: %s! This plot "
'variant is only valid for "%s" mode' % (mode, ffi_mode_str)
@@ -868,9 +824,10 @@ def draw_3d_slip_distribution(problem, po):
if po.load_stage is None:
po.load_stage = -1
- stage = load_stage(problem, stage_number=po.load_stage, load="trace", chains=[-1])
-
if not po.reference:
+ stage = load_stage(
+ problem, stage_number=po.load_stage, load="trace", chains=[-1]
+ )
reference = problem.config.problem_config.get_test_point()
res_point = get_result_point(stage.mtrace, po.post_llk)
reference.update(res_point)
@@ -882,7 +839,6 @@ def draw_3d_slip_distribution(problem, po):
mtrace = None
datatype, cconf = list(problem.composites.items())[0]
-
fault = cconf.load_fault_geometry()
if po.plot_projection in ["local", "latlon"]:
@@ -919,44 +875,48 @@ def draw_3d_slip_distribution(problem, po):
)
slip_units = "m"
+ if len(po.varnames) == 0:
+ varnames = None
+ else:
+ varnames = po.varnames
+
+ if len(po.varnames) == 1:
+ slip_label = po.varnames[0]
if po.varnames[0] == "slip_variation":
from pandas import read_csv
from beat.backend import extract_bounds_from_summary
summarydf = read_csv(
- os.path.join(problem.outfolder, "summary.txt"), sep="\s+"
+ os.path.join(problem.outfolder, "summary.txt"), sep=r"\s+"
)
bounds = extract_bounds_from_summary(
- summarydf, varname="uparr", shape=(fault.npatches,)
+ summarydf,
+ varname="uparr",
+ shape=(fault.npatches,),
+ alpha=0.06,
)
reference["slip_variation"] = bounds[1] - bounds[0]
slip_units = "m"
-
- if len(po.varnames) == 0:
- varnames = None
- else:
- varnames = po.varnames
-
- if len(po.varnames) == 1:
- slip_label = po.varnames[0]
else:
slip_label = "slip"
- if po.source_idxs is None:
- source_idxs = [0, fault.nsubfaults]
- else:
- source_idxs = po.source_idxs
-
- outpath = os.path.join(
+ perspective_outstr = perspective.replace("/", "_")
+ basepath = os.path.join(
problem.outfolder,
po.figure_dir,
- "3d_%s_distribution_%i_%s_%i.%s"
- % (slip_label, po.load_stage, llk_str, po.nensemble, po.outformat),
+ "3d_%s_distribution_%i_%s_%i_%s"
+ % (slip_label, po.load_stage, llk_str, po.nensemble, perspective_outstr),
)
- if not os.path.exists(outpath) or po.force or po.outformat == "display":
- logger.info("Drawing 3d slip-distribution plot ...")
+ if plot_exists(basepath, po.outformat, po.force):
+ return
+
+ if mode == ffi_mode_str:
+ if po.source_idxs is None:
+ source_idxs = [0, fault.nsubfaults]
+ else:
+ source_idxs = po.source_idxs
gmt = slip_distribution_3d_gmt(
fault,
@@ -969,10 +929,25 @@ def draw_3d_slip_distribution(problem, po):
source_idxs=source_idxs,
)
+ outpath = f"{basepath}.{po.outformat}"
logger.info("saving figure to %s" % outpath)
gmt.save(outpath, resolution=300, size=10)
- else:
- logger.info("Plot exists! Use --force to overwrite!")
+ elif mode == bem_mode_str:
+ from .bem import slip_distribution_3d
+
+ composite = problem.composites["geodetic"]
+ composite.point2sources(reference)
+ response = composite.engine.process(
+ sources=composite.sources, targets=composite.targets
+ )
+
+ fig, _ = slip_distribution_3d(
+ response.discretized_sources,
+ response.source_slips(),
+ perspective=perspective,
+ debug=False,
+ )
+ save_figs([fig], basepath, po.outformat, po.dpi)
def slip_distribution_3d_gmt(
@@ -989,7 +964,6 @@ def slip_distribution_3d_gmt(
transparency=0,
source_idxs=None,
):
-
if len(gmtpy.detect_gmt_installations()) < 1:
raise gmtpy.GmtPyError("GMT needs to be installed for station_map plot!")
@@ -997,8 +971,8 @@ def slip_distribution_3d_gmt(
# bin_width = 1 # major grid and tick increment in [deg]
if gmt is None:
- font_size = 12
- font = "1"
+ # font_size = 12
+ # font = "1"
h = 15 # outsize in cm
w = 22
@@ -1107,7 +1081,7 @@ def slip_distribution_3d_gmt(
t=transparency,
W="0.1p",
p=p,
- *J
+ *J,
)
# add a colorbar
diff --git a/beat/plotting/geodetic.py b/beat/plotting/geodetic.py
index a921d096..df3da50c 100644
--- a/beat/plotting/geodetic.py
+++ b/beat/plotting/geodetic.py
@@ -1,39 +1,37 @@
import copy
import logging
-import math
import os
-from scipy import stats
-
import numpy as num
from matplotlib import pyplot as plt
-from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.patches import FancyArrow
from matplotlib.ticker import MaxNLocator
-from pymc3.plots.utils import make_2d
from pyrocko import gmtpy
from pyrocko import orthodrome as otd
from pyrocko.cake_plot import light
from pyrocko.cake_plot import str_to_mpl_color as scolor
-from pyrocko.plot import AutoScaler, mpl_graph_color, mpl_papersize, nice_value
+from pyrocko.plot import mpl_graph_color, mpl_papersize
+from scipy import stats
from beat import utility
from beat.config import ffi_mode_str
from beat.models import Stage
from .common import (
+ cbtick,
format_axes,
get_gmt_colorstring_from_mpl,
- get_latlon_ratio,
get_nice_plot_bounds,
get_result_point,
+ get_weights_point,
km,
- cbtick,
+ plot_covariances,
+ plot_exists,
plot_inset_hist,
+ save_figs,
scale_axes,
set_anchor,
- plot_covariances,
- get_weights_point,
+ set_locator_axes,
)
logger = logging.getLogger("plotting.geodetic")
@@ -43,8 +41,8 @@ def map_displacement_grid(displacements, scene):
arr = num.full_like(scene.displacement, fill_value=num.nan)
qt = scene.quadtree
- for syn_v, l in zip(displacements, qt.leaves):
- arr[l._slice_rows, l._slice_cols] = syn_v
+ for syn_v, leaf in zip(displacements, qt.leaves):
+ arr[leaf._slice_rows, leaf._slice_cols] = syn_v
arr[scene.displacement_mask] = num.nan
return arr
@@ -113,7 +111,6 @@ def shaded_displacements(
def gnss_fits(problem, stage, plot_options):
-
from pyrocko import automap
from pyrocko.model import gnss
@@ -179,7 +176,7 @@ def gnss_fits(problem, stage, plot_options):
dataset_to_result[dataset] = result
if po.plot_projection == "latlon":
- event = problem.config.event
+ # event = problem.config.event
locations = campaign.stations # + [event]
# print(locations)
# lat, lon = otd.geographic_midpoint_locations(locations)
@@ -320,7 +317,7 @@ def gnss_fits(problem, stage, plot_options):
W="0.1p,black",
G=color_str,
t=70,
- *m.jxyr
+ *m.jxyr,
)
m.gmt.psxy(in_rows=in_rows[0:2], W="1p,black", *m.jxyr)
else: # point source
@@ -331,7 +328,7 @@ def gnss_fits(problem, stage, plot_options):
G=color_str,
S="c%fp" % float(source.magnitude * source_scale_factor),
t=70,
- *m.jxyr
+ *m.jxyr,
)
if dataset:
@@ -341,7 +338,6 @@ def gnss_fits(problem, stage, plot_options):
from beat.models.corrections import StrainRateCorrection
for i, corr in enumerate(dataset.corrections):
-
if isinstance(corr, StrainRateCorrection):
lats, lons = corr.get_station_coordinates()
mid_lat, mid_lon = otd.geographic_midpoint(lats, lons)
@@ -355,7 +351,7 @@ def gnss_fits(problem, stage, plot_options):
S="x%f" % offset_scale,
A="9p+g%s+p1p" % color_str,
W=color_str,
- *m.jxyr
+ *m.jxyr,
)
m.draw_axes()
@@ -383,7 +379,7 @@ def gnss_fits(problem, stage, plot_options):
out_filename = "/tmp/histbounds.txt"
m.gmt.pshistogram(
- in_rows=make_2d(all_var_reductions[dataset.component]),
+ in_rows=num.atleast_2d(all_var_reductions[dataset.component]),
W=(imax - imin) / nbins,
out_filename=out_filename,
Z=Z,
@@ -407,13 +403,13 @@ def gnss_fits(problem, stage, plot_options):
] + jxyr
m.gmt.pshistogram(
- in_rows=make_2d(all_var_reductions[dataset.component]),
+ in_rows=num.atleast_2d(all_var_reductions[dataset.component]),
W=(imax - imin) / nbins,
G="lightorange",
Z=Z,
F=True,
L="0.5p,orange",
- *hist_args
+ *hist_args,
)
# plot vertical line on hist with best solution
@@ -423,7 +419,7 @@ def gnss_fits(problem, stage, plot_options):
[bvar_reductions_comp[dataset.component], po.nensemble],
),
W="1.5p,red",
- *jxyr
+ *jxyr,
)
figs.append(m)
@@ -432,7 +428,6 @@ def gnss_fits(problem, stage, plot_options):
def geodetic_covariances(problem, stage, plot_options):
-
datatype = "geodetic"
mode = problem.config.problem_config.mode
problem.init_hierarchicals()
@@ -440,7 +435,7 @@ def geodetic_covariances(problem, stage, plot_options):
po = plot_options
composite = problem.composites[datatype]
- event = composite.event
+ # event = composite.event
try:
sources = composite.sources
ref_sources = None
@@ -464,12 +459,12 @@ def geodetic_covariances(problem, stage, plot_options):
tpoint = get_weights_point(composite, bpoint, problem.config)
- bresults_tmp = composite.assemble_results(bpoint)
+ composite.assemble_results(bpoint)
composite.analyse_noise(tpoint)
covariances = [dataset.covariance for dataset in composite.datasets]
- figs, axs = plot_covariances(composite.datasets, covariances)
+ figs, _ = plot_covariances(composite.datasets, covariances)
return figs
@@ -483,8 +478,6 @@ def scene_fits(problem, stage, plot_options):
from kite.scene import Scene, UserIOWarning
from pyrocko.dataset import gshhg
- from beat.colormap import roma_colormap
-
try:
homepath = problem.config.geodetic_config.types["SAR"].datadir
except KeyError:
@@ -506,10 +499,15 @@ def scene_fits(problem, stage, plot_options):
composite = problem.composites[datatype]
event = composite.event
- try:
- sources = composite.sources
- ref_sources = None
- except AttributeError:
+
+ if po.reference:
+ bpoint = po.reference
+ else:
+ bpoint = get_result_point(stage.mtrace, po.post_llk)
+
+ bresults_tmp = composite.assemble_results(bpoint)
+
+ if mode == ffi_mode_str:
logger.info("FFI scene fit, using reference source ...")
ref_sources = composite.config.gf_config.reference_sources
set_anchor(ref_sources, anchor="top")
@@ -518,16 +516,9 @@ def scene_fits(problem, stage, plot_options):
datatype=datatype, component=composite.slip_varnames[0]
)
set_anchor(sources, anchor="top")
-
- if po.reference:
- if mode != ffi_mode_str:
- composite.point2sources(po.reference)
- ref_sources = copy.deepcopy(composite.sources)
- bpoint = po.reference
else:
- bpoint = get_result_point(stage.mtrace, po.post_llk)
-
- bresults_tmp = composite.assemble_results(bpoint)
+ sources = [source.clone() for source in composite.sources]
+ ref_sources = None
tpoint = get_weights_point(composite, bpoint, problem.config)
@@ -625,11 +616,10 @@ def scene_fits(problem, stage, plot_options):
ax_a = num.atleast_2d(ax)
axes.append(ax_a)
- nfigs = len(figures)
+ # nfigs = len(figures)
def axis_config(axes, source, scene, po):
-
- latlon_ratio = get_latlon_ratio(source.lat, source.lon)
+ latlon_ratio = 1.0 / num.cos(source.effective_lat * num.pi / 180.0)
for i, ax in enumerate(axes):
if po.plot_projection == "latlon":
ystr = "Latitude [deg]"
@@ -660,14 +650,17 @@ def axis_config(axes, source, scene, po):
else:
raise TypeError("Plot projection %s not available" % po.plot_projection)
- ax.xaxis.set_major_locator(MaxNLocator(nbins=3))
- ax.yaxis.set_major_locator(MaxNLocator(nbins=3))
+ set_locator_axes(ax.get_xaxis(), MaxNLocator(nbins=3))
+ set_locator_axes(ax.get_yaxis(), MaxNLocator(nbins=3))
if i == 0:
ax.set_ylabel(ystr, fontsize=fontsize)
ax.set_xlabel(xstr, fontsize=fontsize)
ax.set_yticklabels(ax.get_yticklabels(), rotation=90)
+ scale_x["precision"] = 2
+ scale_y["precision"] = 2
+
ax.scale_x = scale_x
ax.scale_y = scale_y
@@ -706,14 +699,12 @@ def draw_coastlines(ax, xlim, ylim, event, scene, po):
for p in polygons:
if p.is_land() or p.is_antarctic_grounding_line() or p.is_island_in_lake():
-
if scene.frame.isMeter():
ys, xs = otd.latlon_to_ne_numpy(
event.lat, event.lon, p.lats, p.lons
)
elif scene.frame.isDegree():
-
xs = p.lons - event.lon
ys = p.lats - event.lat
@@ -786,7 +777,6 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
bgcolor = kwargs.pop("color", None)
for i, source in enumerate(sources):
-
if scene.frame.isMeter():
fn, fe = source.outline(cs="xy").T
elif scene.frame.isDegree():
@@ -805,7 +795,14 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
ax.fill(
fe, fn, edgecolor=color, facecolor=light(color, 0.5), alpha=alpha
)
- ax.plot(fe[0:2], fn[0:2], "-k", alpha=0.7, linewidth=1.0)
+ n_upper_edge_points = round(fn.size / 2.0)
+ ax.plot(
+ fe[0:n_upper_edge_points],
+ fn[0:n_upper_edge_points],
+ "-k",
+ alpha=0.7,
+ linewidth=1.0,
+ )
else:
ax.plot(fe, fn, marker="*", markersize=10, color=color, **kwargs)
@@ -851,12 +848,12 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
true, turN, tllE, tllN = zip(
*[
(
- l.gridE.max() - offset_e,
- l.gridN.max() - offset_n,
- l.gridE.min() - offset_e,
- l.gridN.min() - offset_n,
+ leaf.gridE.max() - offset_e,
+ leaf.gridN.max() - offset_n,
+ leaf.gridE.min() - offset_e,
+ leaf.gridN.min() - offset_n,
)
- for l in scene.quadtree.leaves
+ for leaf in scene.quadtree.leaves
]
)
@@ -928,7 +925,7 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
if stdz_residuals:
in_ax_res = plot_inset_hist(
axs[2],
- data=make_2d(stdz_residuals[dataset.name]),
+ data=num.atleast_2d(stdz_residuals[dataset.name]),
best_data=None,
linewidth=1.0,
bbox_to_anchor=(0.0, 0.775, 0.25, 0.225),
@@ -943,12 +940,12 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
format_axes(
in_ax_res, remove=["right", "bottom"], visible=True, linewidth=0.75
)
- in_ax_res.set_xlabel("std. res. [$\sigma$]", fontsize=fontsize - 3)
+ in_ax_res.set_xlabel(r"std. res. [$\sigma$]", fontsize=fontsize - 3)
if po.nensemble > 1:
in_ax = plot_inset_hist(
axs[2],
- data=make_2d(all_var_reductions[dataset.name]),
+ data=num.atleast_2d(all_var_reductions[dataset.name]),
best_data=bvar_reductions[dataset.name] * 100.0,
linewidth=1.0,
bbox_to_anchor=(0.75, 0.775, 0.25, 0.225),
@@ -1041,7 +1038,7 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
orientation="horizontal",
)
if po.plot_projection == "individual":
- cblabel = "standard dev [$\sigma$]"
+ cblabel = r"standard dev [$\sigma$]"
cbr.set_label(cblabel, fontsize=fontsize)
@@ -1055,7 +1052,6 @@ def draw_sources(ax, sources, scene, po, event, **kwargs):
def draw_geodetic_covariances(problem, plot_options):
-
if "geodetic" not in list(problem.composites.keys()):
raise TypeError("No geodetic composite defined in the problem!")
@@ -1087,27 +1083,14 @@ def draw_geodetic_covariances(problem, plot_options):
"geodetic_covs_%s_%s" % (stage.number, llk_str),
)
- if not os.path.exists(outpath + ".%s" % po.outformat) or po.force:
- figs = geodetic_covariances(problem, stage, po)
- else:
- logger.info("geodetic covariances plots exist. Use force=True for replotting!")
+ if plot_exists(outpath, po.outformat, po.force):
return
- if po.outformat == "display":
- plt.show()
- else:
- logger.info("saving figures to %s" % outpath)
- if po.outformat == "pdf":
- with PdfPages(outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- fig.savefig("%s_%i.%s" % (outpath, i, po.outformat), dpi=po.dpi)
+ figs = geodetic_covariances(problem, stage, po)
+ save_figs(figs, outpath, po.outformat, po.dpi)
def draw_scene_fits(problem, plot_options):
-
if "geodetic" not in list(problem.composites.keys()):
raise TypeError("No geodetic composite defined in the problem!")
@@ -1143,27 +1126,14 @@ def draw_scene_fits(problem, plot_options):
% (stage.number, llk_str, po.plot_projection, po.nensemble),
)
- if not os.path.exists(outpath + ".%s" % po.outformat) or po.force:
- figs = scene_fits(problem, stage, po)
- else:
- logger.info("scene plots exist. Use force=True for replotting!")
+ if plot_exists(outpath, po.outformat, po.force):
return
- if po.outformat == "display":
- plt.show()
- else:
- logger.info("saving figures to %s" % outpath)
- if po.outformat == "pdf":
- with PdfPages(outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- fig.savefig("%s_%i.%s" % (outpath, i, po.outformat), dpi=po.dpi)
+ figs = scene_fits(problem, stage, po)
+ save_figs(figs, outpath, po.outformat, po.dpi)
def draw_gnss_fits(problem, plot_options):
-
if "geodetic" not in list(problem.composites.keys()):
raise TypeError("No geodetic composite defined in the problem!")
@@ -1199,12 +1169,11 @@ def draw_gnss_fits(problem, plot_options):
"gnss_%s_%s_%i_%s" % (stage.number, llk_str, po.nensemble, po.plot_projection),
)
- if not os.path.exists(outpath) or po.force:
- figs = gnss_fits(problem, stage, po)
- else:
- logger.info("scene plots exist. Use force=True for replotting!")
+ if plot_exists(outpath, po.outformat, po.force):
return
+ figs = gnss_fits(problem, stage, po)
+
if po.outformat == "display":
plt.show()
else:
diff --git a/beat/plotting/marginals.py b/beat/plotting/marginals.py
index 9fc263a3..234cea4d 100644
--- a/beat/plotting/marginals.py
+++ b/beat/plotting/marginals.py
@@ -3,29 +3,26 @@
import os
import numpy as num
+from arviz import plot_density
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
-from matplotlib.backends.backend_pdf import PdfPages
-
-from pymc3 import plots as pmp
-from pymc3 import quantiles
from pyrocko.cake_plot import str_to_mpl_color as scolor
from pyrocko.plot import AutoScaler, mpl_graph_color, mpl_papersize, nice_value
-from scipy.stats import kde
from beat import utility
-from beat.config import dist_vars, geometry_mode_str
-from beat.heart import physical_bounds
+from beat.config import bem_mode_str, dist_vars, geometry_mode_str
+from beat.defaults import hypername
+from beat.heart import defaults
from beat.models import Stage, load_stage
from .common import (
format_axes,
get_result_point,
+ get_transform,
histplot_op,
- hypername,
kde2plot,
- plot_units,
- get_transform,
+ plot_exists,
+ save_figs,
)
logger = logging.getLogger("plotting.marginals")
@@ -107,7 +104,7 @@ def apply_unified_axis(
)
# check physical bounds if passed truncate
- phys_min, phys_max = physical_bounds[v]
+ phys_min, phys_max = defaults[v].physical_bounds
if min < phys_min:
min = phys_min
if max > phys_max:
@@ -194,27 +191,24 @@ def traceplot(
kwargs : dict
for histplot op
qlist : list
- of quantiles to plot. Default: (all, 0., 100.)
+ of quantiles to plot. Default: (almost all, 0.01, 99.99)
Returns
-------
ax : matplotlib axes
"""
- ntickmarks = 2
fontsize = 10
ntickmarks_max = kwargs.pop("ntickmarks_max", 3)
scale_factor = kwargs.pop("scale_factor", 2 / 3)
- lines_color = kwargs.pop("lines_color", "red")
num.set_printoptions(precision=3)
def make_bins(data, nbins=40, qlist=None):
- d = data.flatten()
+ d = data.ravel()
if qlist is not None:
- qu = quantiles(d, qlist=qlist)
- mind = qu[qlist[0]]
- maxd = qu[qlist[-1]]
+ qu = num.percentile(d, q=qlist)
+ mind, maxd = qu[0], qu[-1]
else:
mind = d.min()
maxd = d.max()
@@ -235,8 +229,9 @@ def remove_var(varnames, varname):
if posterior != "None":
llk = trace.get_values("like", combine=combined, chains=chains, squeeze=False)
+
llk = num.squeeze(llk[0])
- llk = pmp.utils.make_2d(llk)
+ llk = num.atleast_2d(llk)
posterior_idxs = utility.get_fit_indexes(llk)
@@ -285,7 +280,6 @@ def remove_var(varnames, varname):
var_idx = 0
varname_page_idx = 0
for nsubplots in nsubplots_page:
-
width, height = mpl_papersize("a4", "portrait")
height_subplot = height / nrow_max
nrow = int(num.ceil(nsubplots / ncol))
@@ -294,7 +288,6 @@ def remove_var(varnames, varname):
axs = num.atleast_2d(axs)
for i in range(nsubplots):
-
coli, rowi = utility.mod_i(i, nrow)
ax = axs[rowi, coli]
@@ -318,7 +311,6 @@ def remove_var(varnames, varname):
plot_name, transform = get_transform(v)
d = transform(d)
# iterate over columns in case varsize > 1
-
if v in dist_vars:
if source_idxs is None:
source_idx_step = int(num.floor(d.shape[1] / 6))
@@ -349,12 +341,12 @@ def remove_var(varnames, varname):
selected = num.vstack(selected)
else:
- selected = d.T
+ selected = num.atleast_2d(d.T)
nsources = selected.shape[0]
logger.debug("Number of sources: %i" % nsources)
for isource, e in enumerate(selected):
- e = pmp.utils.make_2d(e)
+ e = num.atleast_2d(e)
if make_bins_flag:
varbin = make_bins(e, nbins=nbins, qlist=qlist)
varbins.append(varbin)
@@ -378,13 +370,15 @@ def remove_var(varnames, varname):
pcolor = color
if plot_style == "kde":
- pmp.kdeplot(
+ plot_density(
e,
shade=alpha,
ax=ax,
- color=pcolor,
- linewidth=1.0,
- kwargs_shade={"color": pcolor},
+ colors=[pcolor],
+ backend="matplotlib",
+ backend_kwargs={
+ "linewidth": 1.0,
+ },
)
ax.relim()
ax.autoscale(tight=False)
@@ -394,7 +388,6 @@ def remove_var(varnames, varname):
xticker = MaxNLocator(nbins=5)
xax.set_major_locator(xticker)
elif plot_style in ["pdf", "cdf"]:
-
kwargs["label"] = source_idxs
# following determine quantile annotations in cdf
kwargs["nsources"] = nsources
@@ -419,6 +412,7 @@ def remove_var(varnames, varname):
'Plot style "%s" not implemented' % plot_style
)
+ plot_unit = defaults[hypername(plot_name)].unit
try:
param = prior_bounds[v]
@@ -429,9 +423,7 @@ def remove_var(varnames, varname):
except IndexError:
lower, upper = param.lower, param.upper
- title = "{} {}".format(
- v, plot_units[hypername(plot_name)]
- )
+ title = "{} {}".format(v, plot_unit)
else:
lower = num.array2string(param.lower, separator=",")[
1:-1
@@ -442,7 +434,7 @@ def remove_var(varnames, varname):
title = "{} {} \npriors: ({}; {})".format(
plot_name,
- plot_units[hypername(plot_name)],
+ plot_unit,
lower,
upper,
)
@@ -450,9 +442,7 @@ def remove_var(varnames, varname):
try:
title = "{} {}".format(plot_name, float(lines[v]))
except KeyError:
- title = "{} {}".format(
- plot_name, plot_units[hypername(plot_name)]
- )
+ title = "{} {}".format(plot_name, plot_unit)
axs[rowi, coli].set_xlabel(title, fontsize=fontsize)
if nvar == 1:
@@ -465,27 +455,27 @@ def remove_var(varnames, varname):
ax.get_yaxis().set_visible(False)
format_axes(axs[rowi, coli])
ax.tick_params(axis="x", labelsize=fontsize)
- # axs[rowi, coli].set_ylabel("Frequency")
if lines:
try:
- ax.axvline(x=lines[v], color="white", lw=1.0)
- ax.axvline(
- x=lines[v],
- color="black",
- linestyle="dashed",
- lw=1.0,
- )
+ for line in lines[v]:
+ ax.axvline(x=line, color="white", lw=1.0)
+ ax.axvline(
+ x=line,
+ color="black",
+ linestyle="dashed",
+ lw=1.0,
+ )
except KeyError:
pass
if posterior != "None":
if posterior == "all":
for k, idx in posterior_idxs.items():
- ax.axvline(x=e[idx], color=colors[k], lw=1.0)
+ ax.axvline(x=e[:, idx], color=colors[k], lw=1.0)
else:
idx = posterior_idxs[posterior]
- ax.axvline(x=e[idx], color=pcolor, lw=1.0)
+ ax.axvline(x=e[:, idx], color=pcolor, lw=1.0)
if unify:
page_varnames = varnames[varname_page_idx : varname_page_idx + nsubplots]
@@ -523,7 +513,7 @@ def correlation_plot(
Parameters
----------
- mtrace : :class:`pymc3.base.MutliTrace`
+ mtrace : :class:`.base.MutliTrace`
Mutlitrace instance containing the sampling results
varnames : list of variable names
Variables to be plotted, if None all variable are plotted
@@ -574,33 +564,36 @@ def correlation_plot(
d[var] = vals
- for k in range(nvar - 1):
- a = d[varnames[k]]
- for l in range(k + 1, nvar):
- logger.debug("%s, %s" % (varnames[k], varnames[l]))
- b = d[varnames[l]]
+ for i_k in range(nvar - 1):
+ varname_a = varnames[i_k]
+ a = d[varname_a]
+ for i_l in range(i_k + 1, nvar):
+ ax = axs[i_l - 1, i_k]
+ varname_b = varnames[i_l]
+ logger.debug("%s, %s" % (varname_a, varname_b))
+ b = d[varname_b]
- kde2plot(a, b, grid=grid, ax=axs[l - 1, k], cmap=cmap, aspect="auto")
+ kde2plot(a, b, grid=grid, ax=ax, cmap=cmap, aspect="auto")
if point is not None:
- axs[l - 1, k].plot(
- point[varnames[k]],
- point[varnames[l]],
+ ax.plot(
+ point[varnames[i_k]],
+ point[varnames[i_l]],
color=point_color,
marker=point_style,
markersize=point_size,
)
- axs[l - 1, k].tick_params(direction="in")
+ ax.tick_params(direction="in")
- if k == 0:
- axs[l - 1, k].set_ylabel(varnames[l])
+ if i_k == 0:
+ ax.set_ylabel(varname_b)
- axs[l - 1, k].set_xlabel(varnames[k])
+ axs[i_l - 1, i_k].set_xlabel(varname_a)
- for k in range(nvar - 1):
- for l in range(k):
- fig.delaxes(axs[l, k])
+ for i_k in range(nvar - 1):
+ for i_l in range(i_k):
+ fig.delaxes(axs[i_l, i_k])
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.05)
@@ -630,7 +623,7 @@ def correlation_plot_hist(
Parameters
----------
- mtrace : :class:`pymc3.base.MutliTrace`
+ mtrace : :class:`pymc.backends.base.MultiTrace`
Mutlitrace instance containing the sampling results
varnames : list of variable names
Variables to be plotted, if None all variable are plotted
@@ -667,6 +660,7 @@ def correlation_plot_hist(
label_pad = 25
logger.info("Drawing correlation figure ...")
+ logger.warning("Does NOT seperate parameters correctly for Mixed Type Setups!")
if varnames is None:
varnames = mtrace.varnames
@@ -685,7 +679,12 @@ def correlation_plot_hist(
mtrace.get_values(var, chains=chains, combine=True, squeeze=True)
)
- _, nvar_elements = vals.shape
+ logger.info("Getting data for `%s` from sampled trace." % var)
+ try:
+ _, nvar_elements = vals.shape
+ except ValueError: # for variables woth dim=1
+ nvar_elements = 1
+ vals = num.atleast_2d(vals).T
d[var] = vals
@@ -704,17 +703,17 @@ def correlation_plot_hist(
else:
pcolor = hist_color
- for k in range(nvar):
- v_namea = varnames[k]
+ for i_k in range(nvar):
+ v_namea = varnames[i_k]
a = d[v_namea][:, source_i]
- for l in range(k, nvar):
- ax = axs[l, k]
- v_nameb = varnames[l]
+ for i_l in range(i_k, nvar):
+ ax = axs[i_l, i_k]
+ v_nameb = varnames[i_l]
plot_name_a, transform_a = get_transform(v_namea)
plot_name_b, transform_b = get_transform(v_nameb)
logger.debug("%s, %s" % (v_namea, v_nameb))
- if l == k:
+ if i_l == i_k:
if point is not None:
if v_namea in point.keys():
reference = transform_a(point[v_namea][source_i])
@@ -728,7 +727,7 @@ def correlation_plot_hist(
histplot_op(
ax,
- pmp.utils.make_2d(a),
+ num.atleast_2d(a),
alpha=alpha,
color=pcolor,
tstd=0.0,
@@ -769,15 +768,15 @@ def correlation_plot_hist(
yax = ax.get_yaxis()
yax.set_major_locator(yticker)
- if l != nvar - 1:
+ if i_l != nvar - 1:
ax.get_xaxis().set_ticklabels([])
- if k == 0:
+ if i_k == 0:
ax.set_ylabel(
- plot_name_b + "\n " + plot_units[hypername(plot_name_b)],
+ plot_name_b + "\n " + defaults[hypername(plot_name_b)].unit,
fontsize=fontsize,
)
- if utility.is_odd(l):
+ if utility.is_odd(i_l):
ax.tick_params(axis="y", pad=label_pad)
else:
ax.get_yaxis().set_ticklabels([])
@@ -787,16 +786,16 @@ def correlation_plot_hist(
try: # matplotlib version issue workaround
ax.tick_params(axis="both", labelrotation=50.0)
except Exception:
- ax.set_xticklabels(axs[l, k].get_xticklabels(), rotation=50)
- ax.set_yticklabels(axs[l, k].get_yticklabels(), rotation=50)
+ ax.set_xticklabels(axs[i_l, i_k].get_xticklabels(), rotation=50)
+ ax.set_yticklabels(axs[i_l, i_k].get_yticklabels(), rotation=50)
- if utility.is_odd(k):
+ if utility.is_odd(i_k):
ax.tick_params(axis="x", pad=label_pad)
# put transformed varname back to varnames for unification
# varnames[k] = plot_name_a
ax.set_xlabel(
- plot_name_a + "\n " + plot_units[hypername(plot_name_a)],
+ plot_name_a + "\n " + defaults[hypername(plot_name_a)].unit,
fontsize=fontsize,
)
@@ -826,13 +825,13 @@ def correlation_plot_hist(
ntickmarks_max=ntickmarks_max,
)
- for k in range(nvar):
+ for i_k in range(nvar):
if unify:
# reset histogram ylims after unify
- axs[k, k].set_ylim(hist_ylims[k])
+ axs[i_k, i_k].set_ylim(hist_ylims[i_k])
- for l in range(k):
- fig.delaxes(axs[l, k])
+ for i_l in range(i_k):
+ fig.delaxes(axs[i_l, i_k])
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.05)
@@ -893,68 +892,57 @@ def draw_posteriors(problem, plot_options):
else:
sidxs = ""
- outpath_tmp = os.path.join(
+ outpath = os.path.join(
problem.outfolder,
po.figure_dir,
"stage_%i_%s_%s_%s" % (s, sidxs, po.post_llk, plot_style),
)
- if not os.path.exists(outpath_tmp + ".%s" % po.outformat) or po.force:
- logger.info("plotting stage: %s" % stage.handler.stage_path(s))
- stage.load_results(
- varnames=problem.varnames,
- model=problem.model,
- stage_number=s,
- load="trace",
- chains=[-1],
- )
+ if plot_exists(outpath, po.outformat, po.force):
+ return
- prior_bounds = {}
- prior_bounds.update(**pc.hyperparameters)
- prior_bounds.update(**pc.hierarchicals)
- prior_bounds.update(**pc.priors)
-
- figs, _, _ = traceplot(
- stage.mtrace,
- varnames=varnames,
- chains=None,
- combined=True,
- source_idxs=po.source_idxs,
- plot_style=plot_style,
- lines=po.reference,
- posterior=po.post_llk,
- prior_bounds=prior_bounds,
- nbins=nbins,
- )
+ logger.info("plotting stage: %s" % stage.handler.stage_path(s))
+ stage.load_results(
+ varnames=problem.varnames,
+ model=problem.model,
+ stage_number=s,
+ load="trace",
+ chains=[-1],
+ )
- if po.outformat == "display":
- plt.show()
- else:
- logger.info("saving figures to %s" % outpath_tmp)
- if po.outformat == "pdf":
- with PdfPages(outpath_tmp + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- outpath = "%s_%i.%s" % (outpath_tmp, i, po.outformat)
- logger.info("saving figure to %s" % outpath)
- fig.savefig(outpath, format=po.outformat, dpi=po.dpi)
+ prior_bounds = {}
+ prior_bounds.update(**pc.hyperparameters)
+ prior_bounds.update(**pc.hierarchicals)
+ prior_bounds.update(**pc.priors)
- else:
- logger.info("plot for stage %s exists. Use force=True for replotting!" % s)
+ figs, _, _ = traceplot(
+ stage.mtrace,
+ varnames=varnames,
+ chains=None,
+ combined=True,
+ source_idxs=po.source_idxs,
+ plot_style=plot_style,
+ lines=po.reference,
+ posterior=po.post_llk,
+ prior_bounds=prior_bounds,
+ nbins=nbins,
+ )
+
+ save_figs(figs, outpath, po.outformat, po.dpi)
def draw_correlation_hist(problem, plot_options):
"""
- Draw parameter correlation plot and histograms from the final atmip stage.
+ Draw parameter correlation plot and histograms for a model result ensemble.
Only feasible for 'geometry' problem.
"""
po = plot_options
mode = problem.config.problem_config.mode
- assert mode == geometry_mode_str
+ if mode not in [geometry_mode_str, bem_mode_str]:
+ raise NotImplementedError(f"The correlation plot is not implemented for {mode}")
+
assert po.load_stage != 0
hypers = utility.check_hyper_flag(problem)
@@ -962,7 +950,7 @@ def draw_correlation_hist(problem, plot_options):
if hypers:
varnames = problem.hypernames
else:
- varnames = list(problem.varnames) + problem.hypernames + ["like"]
+ varnames = list(problem.varnames)
if len(po.varnames) > 0:
varnames = po.varnames
@@ -990,28 +978,16 @@ def draw_correlation_hist(problem, plot_options):
"corr_hist_%s_%s" % (stage.number, llk_str),
)
- if not os.path.exists(outpath) or po.force:
- figs, _ = correlation_plot_hist(
- mtrace=stage.mtrace,
- varnames=varnames,
- cmap=plt.cm.gist_earth_r,
- chains=None,
- point=reference,
- point_size=6,
- point_color="red",
- )
- else:
- logger.info("correlation plot exists. Use force=True for replotting!")
+ if plot_exists(outpath, po.outformat, po.force):
return
- if po.outformat == "display":
- plt.show()
- else:
- logger.info("saving figures to %s" % outpath)
- if po.outformat == "pdf":
- with PdfPages(outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- fig.savefig("%s_%i.%s" % (outpath, i, po.outformat), dpi=po.dpi)
+ figs, _ = correlation_plot_hist(
+ mtrace=stage.mtrace,
+ varnames=varnames,
+ cmap=plt.cm.gist_earth_r,
+ chains=None,
+ point=reference,
+ point_size=6,
+ point_color="red",
+ )
+ save_figs(figs, outpath, po.outformat, po.dpi)
diff --git a/beat/plotting/seismic.py b/beat/plotting/seismic.py
index a1b7b68f..4b827831 100644
--- a/beat/plotting/seismic.py
+++ b/beat/plotting/seismic.py
@@ -1,15 +1,9 @@
import logging
import os
-from scipy import stats
-
-from tqdm import tqdm
-
import numpy as num
from matplotlib import pyplot as plt
-from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MaxNLocator
-from pymc3.plots.utils import make_2d
from pyrocko import gmtpy, trace
from pyrocko.cake_plot import str_to_mpl_color as scolor
from pyrocko.guts import load
@@ -21,6 +15,8 @@
mpl_margins,
mpl_papersize,
)
+from scipy import stats
+from tqdm import tqdm
from beat import utility
from beat.heart import calculate_radiation_weights
@@ -29,16 +25,19 @@
from .common import (
draw_line_on_array,
format_axes,
- hide_ticks,
get_gmt_config,
+ get_llk_idx_to_trace,
get_result_point,
+ get_weights_point,
+ hide_ticks,
+ hist2d_plot_op,
+ plot_exists,
plot_inset_hist,
+ save_figs,
spherical_kde_op,
str_dist,
str_duration,
str_unit,
- get_weights_point,
- hist2d_plot_op,
)
km = 1000.0
@@ -48,6 +47,10 @@
logger = logging.getLogger("plotting.seismic")
+def skey(tr):
+ return tr.channel
+
+
def n_model_plot(models, axes=None, draw_bg=True, highlightidx=[]):
"""
Plot cake layered earth models.
@@ -111,7 +114,6 @@ def plot_profile(mod, axes, vp_c, vs_c, lw=0.5):
def load_earthmodels(store_superdir, store_ids, depth_max="cmb"):
-
ems = []
emr = []
for store_id in store_ids:
@@ -127,13 +129,11 @@ def load_earthmodels(store_superdir, store_ids, depth_max="cmb"):
def draw_earthmodels(problem, plot_options):
-
from beat.heart import init_geodetic_targets, init_seismic_targets
po = plot_options
for datatype, composite in problem.composites.items():
-
if datatype == "seismic":
models_dict = {}
sc = problem.config.seismic_config
@@ -197,15 +197,17 @@ def draw_earthmodels(problem, plot_options):
if not os.path.exists(outpath) or po.force:
targets = init_geodetic_targets(
datasets=composite.datasets,
+ event=problem.config.event,
earth_model_name=gc.gf_config.earth_model_name,
interpolation="multilinear",
crust_inds=list(range(*gc.gf_config.n_variations)),
sample_rate=gc.gf_config.sample_rate,
)
+ store_ids = [t.store_id for t in targets]
models = load_earthmodels(
store_superdir=composite.engine.store_superdirs[0],
- targets=targets,
+ store_ids=store_ids,
depth_max=gc.gf_config.source_depth_max * km,
)
models_dict[outpath] = models[0] # select only source site
@@ -279,7 +281,6 @@ def fuzzy_waveforms(
if extent is None:
key = traces[0].channel
- skey = lambda tr: tr.channel
ymin, ymax = trace.minmax(traces, key=skey)[key]
xmin, xmax = trace.minmaxtime(traces, key=skey)[key]
@@ -292,7 +293,6 @@ def fuzzy_waveforms(
grid = num.zeros(grid_size, dtype="float64")
for tr in traces:
-
draw_line_on_array(
tr.get_xdata(),
tr.ydata,
@@ -334,7 +334,6 @@ def fuzzy_spectrum(
cmap=None,
alpha=0.5,
):
-
if cmap is None:
cmap = get_fuzzy_cmap()
@@ -343,7 +342,6 @@ def fuzzy_spectrum(
if extent is None:
key = traces[0].channel
- skey = lambda tr: tr.channel
ymin, ymax = trace.minmax(traces, key=skey)[key]
@@ -352,10 +350,7 @@ def fuzzy_spectrum(
)
extent = [*taper_frequencies, 0, ypad_factor * ymax]
- else:
- lower_idx, upper_idx = 0, -1
- # fxdata = fxdata[lower_idx:upper_idx]
for tr in traces:
ydata = zero_pad_spectrum(tr)
draw_line_on_array(
@@ -400,8 +395,7 @@ def extract_time_shifts(point, hierarchicals, wmap):
def form_result_ensemble(
stage, composite, nensemble, chop_bounds, target_index, bresults, bvar_reductions
):
-
- if nensemble > 1:
+ if nensemble > 0:
logger.info("Collecting ensemble of %i synthetic waveforms ..." % nensemble)
nchains = len(stage.mtrace)
csteps = float(nchains) / nensemble
@@ -444,7 +438,7 @@ def form_result_ensemble(
target_synths.append(bresults[i].processed_syn)
target_var_reductions.append(bvar_reductions[nslcd_id_str])
- if nensemble > 1:
+ if nensemble > 0:
for results, var_reductions in zip(ens_results, ens_var_reductions):
# put all results per target here not only single
@@ -491,7 +485,6 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
t2 = num.concatenate((t, t[::-1]))
axes.fill(t2, y2, **kwargs)
- skey = lambda tr: tr.channel
inset_axs_width, inset_axs_height = 0.2, 0.18
plot_taper(
@@ -517,7 +510,7 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
in_ax = plot_inset_hist(
axes,
- data=make_2d(var_reductions),
+ data=num.atleast_2d(var_reductions),
best_data=best_data,
bbox_to_anchor=(0.9, 0.75, inset_axs_width, inset_axs_height),
background_alpha=0.7,
@@ -527,7 +520,7 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
# histogram of stdz residual
in_ax_res = plot_inset_hist(
axes,
- data=make_2d(stdz_residual),
+ data=num.atleast_2d(stdz_residual),
best_data=None,
bbox_to_anchor=(0.65, 0.75, inset_axs_width, inset_axs_height),
color="grey",
@@ -537,7 +530,7 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
x = num.linspace(*stats.norm.ppf((0.001, 0.999)), 100)
gauss = stats.norm.pdf(x)
in_ax_res.plot(x, gauss, "k-", lw=0.5, alpha=0.8)
- in_ax_res.set_title("std. res. [$\sigma$]", fontsize=5)
+ in_ax_res.set_title(r"std. res. [$\sigma$]", fontsize=5)
if synth_plot_flag:
# only plot if highlighted point exists
@@ -568,10 +561,10 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
if po.nensemble > 1:
in_ax = plot_inset_hist(
axes,
- data=make_2d(time_shifts),
+ data=num.atleast_2d(time_shifts),
best_data=best_data,
bbox_to_anchor=(-0.0985, 0.16, inset_axs_width, inset_axs_height),
- # cmap=plt.cm.get_cmap('seismic'),
+ # cmap=plt.get_cmap('seismic'),
# cbounds=time_shift_bounds,
color=time_shift_color,
alpha=0.7,
@@ -590,19 +583,18 @@ def plot_taper(axes, t, taper, mode="geometry", **kwargs):
(
tmarks[0],
ytmarks[0],
- "$\,$ " + str_duration(tmarks[0] - source.time),
+ r"$\,$ " + str_duration(tmarks[0] - source.time),
hor_alignment,
"bottom",
),
(
tmarks[1],
ytmarks[1],
- "$\Delta$ " + str_duration(tmarks[1] - tmarks[0]),
+ r"$\Delta$ " + str_duration(tmarks[1] - tmarks[0]),
"center",
"bottom",
),
]:
-
axes2.annotate(
text,
xy=(xtmark, ytmark),
@@ -651,7 +643,6 @@ def subplot_spectrum(
tap_color_annot,
ypad_factor,
):
-
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
inset_axs_width, inset_axs_height = 0.2, 0.18
@@ -692,7 +683,7 @@ def subplot_spectrum(
in_ax = plot_inset_hist(
axes2,
- data=make_2d(var_reductions),
+ data=num.atleast_2d(var_reductions),
best_data=best_data,
bbox_to_anchor=(0.9, bbox_y, inset_axs_width, inset_axs_height),
)
@@ -701,7 +692,7 @@ def subplot_spectrum(
# histogram of stdz residual
in_ax_res = plot_inset_hist(
axes2,
- data=make_2d(stdz_residual),
+ data=num.atleast_2d(stdz_residual),
best_data=None,
bbox_to_anchor=(0.65, bbox_y, inset_axs_width, inset_axs_height),
color="grey",
@@ -711,7 +702,7 @@ def subplot_spectrum(
x = num.linspace(*stats.norm.ppf((0.001, 0.999)), 100)
gauss = stats.norm.pdf(x)
in_ax_res.plot(x, gauss, "k-", lw=0.5, alpha=0.8)
- in_ax_res.set_title("spc. std. res. [$\sigma$]", fontsize=5)
+ in_ax_res.set_title(r"spc. std. res. [$\sigma$]", fontsize=5)
fxdata = result.processed_syn.get_xdata()
@@ -719,7 +710,6 @@ def subplot_spectrum(
colors = [obs_color, syn_color, misfit_color]
ymaxs = []
for attr_suffix, lw, color in zip(["obs", "syn", "res"], linewidths, colors):
-
tr = getattr(result, "processed_{}".format(attr_suffix))
ydata = zero_pad_spectrum(tr)
ymaxs.append(ydata.max())
@@ -765,7 +755,7 @@ def subplot_spectrum(
)
axes.annotate(
- "$ f \ |\ ^{%0.2g}_{%0.2g} \ $" % (fxdata[0], xpos),
+ r"$ f \ |\ ^{%0.2g}_{%0.2g} \ $" % (fxdata[0], xpos),
xycoords="data",
xy=(xpos, ymax_factor_f * ymax),
xytext=(1.0, 1.0),
@@ -914,12 +904,10 @@ def seismic_fits(problem, stage, plot_options):
)
cg_to_target_codes = utility.gather(unique_target_codes, lambda t: t[3])
cgs = cg_to_target_codes.keys()
- target_domains = list(utility.gather(event_targets, lambda t: t.domain).keys())
+ # target_domains = list(utility.gather(event_targets, lambda t: t.domain).keys())
channel_index = dict((channel, i) for (i, channel) in enumerate(cgs))
- skey = lambda tr: tr.channel
-
figs = []
logger.info("Plotting waveforms ... for event number: %i" % event_idx)
logger.info(event.__str__())
@@ -933,16 +921,20 @@ def seismic_fits(problem, stage, plot_options):
dist = source.distance_to(target)
data.append(dist)
- dists = num.array(data, dtype=num.float)
+ dists = num.array(data, dtype=num.float64)
iorder = num.argsort(dists)
ns_id_codes_sorted = [list(ns_id_to_target_codes.keys())[ii] for ii in iorder]
+ if len(ns_id_codes_sorted) == 0:
+ logger.info("Did not find targets for event, skipping plotting ...")
+ continue
+
figures = {}
# draw station specific data-fits
for istation, ns_id in enumerate(ns_id_codes_sorted):
target_codes = ns_id_to_target_codes[ns_id]
- have_drawn = []
+
for target_code in target_codes:
domain_targets = target_codes_to_targets[target_code]
for k_subf, target in enumerate(domain_targets):
@@ -1097,7 +1089,6 @@ def seismic_fits(problem, stage, plot_options):
def draw_seismic_fits(problem, po):
-
if "seismic" not in list(problem.composites.keys()):
raise TypeError("No seismic composite defined for this problem!")
@@ -1128,27 +1119,13 @@ def draw_seismic_fits(problem, po):
"waveforms_%s_%s_%i" % (stage.number, llk_str, po.nensemble),
)
- if not os.path.exists(outpath) or po.force:
- event_figs = seismic_fits(problem, stage, po)
- else:
- logger.info("waveform plots exist. Use force=True for replotting!")
+ if plot_exists(outpath, po.outformat, po.force):
return
- if po.outformat == "display":
- plt.show()
- else:
- for event_idx, figs in event_figs:
- event_outpath = "{}_{}".format(outpath, event_idx)
- logger.info("saving figures to %s" % event_outpath)
- if po.outformat == "pdf":
- with PdfPages(event_outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- fig.savefig(
- event_outpath + "_%i.%s" % (i, po.outformat), dpi=po.dpi
- )
+ event_figs = seismic_fits(problem, stage, po)
+ for event_idx, figs in event_figs:
+ event_outpath = f"{outpath}_{event_idx}"
+ save_figs(figs, event_outpath, po.outformat, po.dpi)
def point2array(point, varnames, idx_source=1, rpoint=None):
@@ -1177,84 +1154,103 @@ def extract_mt_components(problem, po, include_magnitude=False):
"""
Extract Moment Tensor components from problem results for plotting.
"""
- source_type = problem.config.problem_config.source_type
+ source_types = problem.config.problem_config.source_types
n_sources = problem.config.problem_config.n_sources
- if source_type in ["MTSource", "MTQTSource"]:
- varnames = ["mnn", "mee", "mdd", "mne", "mnd", "med"]
- elif source_type in ["DCSource", "RectangularSource"]:
- varnames = ["strike", "dip", "rake"]
- else:
- raise ValueError('Plot is only supported for point "MTSource" and "DCSource"')
-
- if include_magnitude:
- varnames += ["magnitude"]
-
- if not po.reference:
- rpoint = None
- llk_str = po.post_llk
- stage = load_stage(
- problem, stage_number=po.load_stage, load="trace", chains=[-1]
- )
-
- list_m6s = []
- list_best_mts = []
- for idx_source in range(n_sources):
- n_mts = len(stage.mtrace)
- m6s = num.empty((n_mts, len(varnames)), dtype="float64")
- for i, varname in enumerate(varnames):
- try:
- m6s[:, i] = (
- stage.mtrace.get_values(varname, combine=True, squeeze=True)
- .T[idx_source]
- .ravel()
- )
+ composite = problem.composites[problem.config.problem_config.datatypes[0]]
+
+ list_m6s = []
+ list_best_mts = []
+ running_source_idx = 0
+ for n_source, source_type in zip(n_sources, source_types):
+ if source_type in ["MTSource", "MTQTSource"]:
+ varnames = ["mnn", "mee", "mdd", "mne", "mnd", "med"]
+ elif source_type in [
+ "DCSource",
+ "RectangularSource",
+ "RectangularBEMSource",
+ "DiskBEMSource",
+ ]:
+ varnames = ["strike", "dip", "rake"]
+ else:
+ logger.warning("Plot is not supported for source_type %s" % source_type)
+ list_m6s.append(None)
+ list_best_mts.append(None)
+ continue
- except ValueError: # if fixed value add that to the ensemble
- rpoint = problem.get_random_point()
- mtfield = num.full_like(
- num.empty((n_mts), dtype=num.float64),
- rpoint[varname][idx_source],
- )
- m6s[:, i] = mtfield
+ if include_magnitude:
+ varnames += ["magnitude"]
- if po.nensemble:
- logger.info("Drawing %i solutions from ensemble ..." % po.nensemble)
- csteps = float(n_mts) / po.nensemble
- idxs = num.floor(num.arange(0, n_mts, csteps)).astype("int32")
- m6s = m6s[idxs, :]
- else:
- logger.info("Drawing full ensemble ...")
+ if not po.reference:
+ rpoint = None
+ llk_str = po.post_llk
+ stage = load_stage(
+ problem, stage_number=po.load_stage, load="trace", chains=[-1]
+ )
+ best_idx = get_llk_idx_to_trace(stage.mtrace, po.post_llk)
point = get_result_point(stage.mtrace, po.post_llk)
- best_mt = point2array(
- point, varnames=varnames, rpoint=rpoint, idx_source=idx_source
+ source_points = utility.split_point(
+ point,
+ mapping=composite.mapping,
+ weed_params=True,
)
- list_m6s.append(m6s)
- list_best_mts.append(best_mt)
- else:
- llk_str = "ref"
- point = po.reference
- list_best_mts = []
- list_m6s = []
- if source_type == "MTQTSource":
- composite = problem.composites[problem.config.problem_config.datatypes[0]]
- composite.point2sources(po.reference)
- for source in composite.sources:
- list_m6s.append([source.get_derived_parameters()[0:6]])
- list_best_mts.append(None)
+ for idx_source in range(n_source):
+ n_mts = len(stage.mtrace)
+ m6s = num.empty((n_mts, len(varnames)), dtype="float64")
+ for i, varname in enumerate(varnames):
+ try:
+ m6s[:, i] = (
+ stage.mtrace.get_values(varname, combine=True, squeeze=True)
+ .T[idx_source]
+ .ravel()
+ )
+
+ except ValueError: # if fixed value add that to the ensemble
+ rpoint = source_points[running_source_idx]
+ mtfield = num.full_like(
+ num.empty((n_mts), dtype=num.float64),
+ rpoint[varname],
+ )
+ m6s[:, i] = mtfield
+
+ best_mt = m6s[best_idx, :]
+ if po.nensemble:
+ logger.info("Drawing %i solutions from ensemble ..." % po.nensemble)
+ csteps = float(n_mts) / po.nensemble
+ idxs = num.floor(num.arange(0, n_mts, csteps)).astype("int32")
+ m6s = m6s[idxs, :]
+ else:
+ logger.info("Drawing full ensemble ...")
+ list_m6s.append(m6s)
+ list_best_mts.append(best_mt)
+ running_source_idx += 1
else:
- for idx_source in range(n_sources):
- list_m6s.append(
- [
- point2array(
- point=po.reference, varnames=varnames, idx_source=idx_source
+ llk_str = "ref"
+ point = po.reference
+ list_best_mts = []
+ list_m6s = []
+ for n_source, source_type in zip(n_sources, source_types):
+ if source_type == "MTQTSource":
+ composite.point2sources(po.reference)
+ for source in composite.sources:
+ list_m6s.append([source.get_derived_parameters()[0:6]])
+ list_best_mts.append(None)
+
+ else:
+ for idx_source in range(n_source):
+ list_m6s.append(
+ [
+ point2array(
+ point=po.reference,
+ varnames=varnames,
+ idx_source=idx_source,
+ )
+ ]
)
- ]
- )
- list_best_mts.append(None)
+ list_best_mts.append(None)
return list_m6s, list_best_mts, llk_str, point
@@ -1316,7 +1312,6 @@ def draw_ray_piercing_points_bb(
raise ValueError("Number of stations is inconsistent with polarity data!")
for i_s, station in enumerate(stations):
-
ax.text(
y[i_s],
x[i_s],
@@ -1335,7 +1330,6 @@ def draw_ray_piercing_points_bb(
def lower_focalsphere_angles(grid_resolution, projection):
-
nx = grid_resolution
ny = grid_resolution
@@ -1397,7 +1391,6 @@ def mts2amps(
view="top",
wavename="any_P",
):
-
n_balls = len(mts)
nx = ny = grid_resolution
@@ -1406,7 +1399,6 @@ def mts2amps(
)
for mt in mts:
-
mt = beachball.deco_part(mt, mt_type=beachball_type, view=view)
radiation_weights = calculate_radiation_weights(
@@ -1502,16 +1494,16 @@ def plot_fuzzy_beachball_mpl_pixmap(
zorder=zorder,
alpha=alpha,
)
-
elif method == "imshow":
+ extent = (
+ position[0] + y[0] * size,
+ position[0] + y[-1] * size,
+ position[1] - x[0] * size,
+ position[1] - x[-1] * size,
+ )
axes.imshow(
amps.T,
- extent=(
- position[0] + y[0] * size,
- position[0] + y[-1] * size,
- position[1] - x[0] * size,
- position[1] - x[-1] * size,
- ),
+ extent=extent,
cmap=cmap,
transform=transform,
zorder=zorder - 0.1,
@@ -1546,9 +1538,12 @@ def plot_fuzzy_beachball_mpl_pixmap(
phi = num.linspace(0.0, 2 * PI, 361)
x = num.cos(phi)
y = num.sin(phi)
+ pos_y = position[0] + y * size
+ pos_x = position[1] + x * size
+
axes.plot(
- position[0] + y * size,
- position[1] + x * size,
+ pos_y,
+ pos_x,
linewidth=linewidth,
color=edgecolor,
transform=transform,
@@ -1558,7 +1553,6 @@ def plot_fuzzy_beachball_mpl_pixmap(
def draw_fuzzy_beachball(problem, po):
-
if po.load_stage is None:
po.load_stage = -1
@@ -1588,101 +1582,87 @@ def draw_fuzzy_beachball(problem, po):
wavenames = ["any_P"]
for k_pamp, wavename in enumerate(wavenames):
-
for idx_source, (m6s, best_mt) in enumerate(zip(list_m6s, list_best_mt)):
outpath = os.path.join(
problem.outfolder,
po.figure_dir,
- "fuzzy_beachball_%i_%s_%i_%s_%i.%s"
- % (
- po.load_stage,
- llk_str,
- po.nensemble,
- wavename,
- idx_source,
- po.outformat,
- ),
+ "fuzzy_beachball_%i_%s_%i_%s_%i"
+ % (po.load_stage, llk_str, po.nensemble, wavename, idx_source),
)
- if not os.path.exists(outpath) or po.force or po.outformat == "display":
- fig = plt.figure(figsize=(4.0, 4.0))
- fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
- axes = fig.add_subplot(1, 1, 1)
+ if plot_exists(outpath, po.outformat, po.force):
+ return
- transform, position, size = beachball.choose_transform(
- axes, kwargs["size_units"], kwargs["position"], kwargs["size"]
- )
+ fig = plt.figure(figsize=(4.0, 4.0))
+ fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
+ axes = fig.add_subplot(1, 1, 1)
- plot_fuzzy_beachball_mpl_pixmap(
- m6s,
- axes,
- best_mt=best_mt,
- best_color="white",
+ transform, position, size = beachball.choose_transform(
+ axes, kwargs["size_units"], kwargs["position"], kwargs["size"]
+ )
+
+ plot_fuzzy_beachball_mpl_pixmap(
+ m6s,
+ axes,
+ best_mt=best_mt,
+ best_color="white",
+ wavename=wavename,
+ **kwargs,
+ )
+
+ if best_mt is not None:
+ best_amps, bx, by = mts2amps(
+ [best_mt],
+ grid_resolution=kwargs["grid_resolution"],
+ projection=kwargs["projection"],
+ beachball_type=kwargs["beachball_type"],
wavename=wavename,
- **kwargs
+ mask=False,
)
- if best_mt is not None:
- best_amps, bx, by = mts2amps(
- [best_mt],
- grid_resolution=kwargs["grid_resolution"],
- projection=kwargs["projection"],
- beachball_type=kwargs["beachball_type"],
- wavename=wavename,
- mask=False,
- )
+ axes.contour(
+ position[0] + by * size,
+ position[1] + bx * size,
+ best_amps.T,
+ levels=[0.0],
+ colors=["black"],
+ linestyles="dashed",
+ linewidths=kwargs["linewidth"],
+ transform=transform,
+ zorder=kwargs["zorder"],
+ alpha=kwargs["alpha"],
+ )
- axes.contour(
- position[0] + by * size,
- position[1] + bx * size,
- best_amps.T,
- levels=[0.0],
- colors=["black"],
- linestyles="dashed",
- linewidths=kwargs["linewidth"],
- transform=transform,
- zorder=kwargs["zorder"],
- alpha=kwargs["alpha"],
- )
+ if "polarity" in problem.config.problem_config.datatypes:
+ pmap = composite.wavemaps[k_pamp]
+ source = composite.sources[pmap.config.event_idx]
+ pmap.update_targets(
+ composite.engine,
+ source,
+ always_raytrace=composite.config.gf_config.always_raytrace,
+ )
+ draw_ray_piercing_points_bb(
+ axes,
+ pmap.get_takeoff_angles_rad(),
+ pmap.get_azimuths_rad(),
+ pmap._prepared_data,
+ stations=pmap.stations,
+ size=size,
+ position=position,
+ transform=transform,
+ )
- if "polarity" in problem.config.problem_config.datatypes:
- pmap = composite.wavemaps[k_pamp]
- source = composite.sources[pmap.config.event_idx]
- pmap.update_targets(
- composite.engine,
- source,
- always_raytrace=composite.config.gf_config.always_raytrace,
- )
- draw_ray_piercing_points_bb(
- axes,
- pmap.get_takeoff_angles_rad(),
- pmap.get_azimuths_rad(),
- pmap._prepared_data,
- stations=pmap.stations,
- size=size,
- position=position,
- transform=transform,
- )
+ axes.set_xlim(0.0, 10.0)
+ axes.set_ylim(0.0, 10.0)
+ axes.set_axis_off()
- axes.set_xlim(0.0, 10.0)
- axes.set_ylim(0.0, 10.0)
- axes.set_axis_off()
-
- if not po.outformat == "display":
- logger.info("saving figure to %s" % outpath)
- fig.savefig(outpath, dpi=po.dpi)
- else:
- plt.show()
-
- else:
- logger.info("Plot already exists! Please use --force to overwrite!")
+ save_figs([fig], outpath, po.outformat, po.dpi)
def fuzzy_mt_decomposition(axes, list_m6s, labels=None, colors=None, fontsize=12):
"""
Plot fuzzy moment tensor decompositions for list of mt ensembles.
"""
- from pymc3 import quantiles
from pyrocko.moment_tensor import MomentTensor
logger.info("Drawing Fuzzy MT Decomposition ...")
@@ -1694,11 +1674,10 @@ def fuzzy_mt_decomposition(axes, list_m6s, labels=None, colors=None, fontsize=12
"size_units": "data",
"edgecolor": "black",
"linewidth": 1,
- "grid_resolution": 200,
+ "grid_resolution": 400,
}
def get_decomps(source_vals):
-
isos = []
dcs = []
clvds = []
@@ -1731,7 +1710,9 @@ def get_decomps(source_vals):
lines.append((label, m6s, color))
- magnitude_full_max = max(m6s.mean(axis=0)[-1] for (_, m6s, _) in lines)
+ magnitude_full_max = max(
+ m6s.mean(axis=0)[-1] for (_, m6s, _) in lines if m6s is not None
+ )
for xpos, label in [
(0.0, "Full"),
@@ -1740,7 +1721,6 @@ def get_decomps(source_vals):
(6.0, "CLVD"),
(8.0, "DC"),
]:
-
axes.annotate(
label,
xy=(1 + xpos, nlines_max),
@@ -1754,6 +1734,9 @@ def get_decomps(source_vals):
)
for i, (label, m6s, color_t) in enumerate(lines):
+ if m6s is None:
+ continue
+
ypos = nlines_max - (i * yscale) - 1.0
mean_magnitude = m6s.mean(0)[-1]
size0 = mean_magnitude / magnitude_full_max
@@ -1778,30 +1761,27 @@ def get_decomps(source_vals):
(6.0, clvds, "+"),
(8.0, dcs, None),
]:
-
ratios = num.array([comp[1] for comp in decomp])
ratio = ratios.mean()
ratios_diff = ratios.max() - ratios.min()
- ratios_qu = quantiles(ratios * 100.0)
+ ratios_qu = num.percentile(ratios * 100.0, [2.5, 97.5])
mt_parts = [comp[2] for comp in decomp]
if ratio > 1e-4:
try:
- size = num.sqrt(ratio) * 0.95 * size0
+ size = num.sqrt(ratio) * 0.98 * size0
kwargs["position"] = (1.0 + xpos, ypos)
kwargs["size"] = size
kwargs["color_t"] = color_t
- beachball.plot_fuzzy_beachball_mpl_pixmap(
+ plot_fuzzy_beachball_mpl_pixmap(
mt_parts, axes, best_mt=None, **kwargs
)
if ratios_diff > 0.0:
- label = "{:03.1f}-{:03.1f}%".format(
- ratios_qu[2.5], ratios_qu[97.5]
- )
+ label = "{:03.1f}-{:03.1f}%".format(*ratios_qu)
else:
- label = "{:03.1f}%".format(ratios_qu[2.5])
+ label = "{:03.1f}%".format(ratios_qu[0])
axes.annotate(
label,
@@ -1867,10 +1847,9 @@ def get_decomps(source_vals):
def draw_fuzzy_mt_decomposition(problem, po):
-
fontsize = 10
- n_sources = problem.config.problem_config.n_sources
+ n_sources = sum(problem.config.problem_config.n_sources)
if po.load_stage is None:
po.load_stage = -1
@@ -1880,27 +1859,20 @@ def draw_fuzzy_mt_decomposition(problem, po):
outpath = os.path.join(
problem.outfolder,
po.figure_dir,
- "fuzzy_mt_decomposition_%i_%s_%i.%s"
- % (po.load_stage, llk_str, po.nensemble, po.outformat),
+ "fuzzy_mt_decomposition_%i_%s_%i" % (po.load_stage, llk_str, po.nensemble),
)
- if not os.path.exists(outpath) or po.force or po.outformat == "display":
-
- height = 1.5 + (n_sources - 1) * 0.65
- fig = plt.figure(figsize=(6.0, height))
- fig.subplots_adjust(left=0.01, right=0.99, bottom=0.03, top=0.97)
- axes = fig.add_subplot(1, 1, 1)
+ if plot_exists(outpath, po.outformat, po.force):
+ return
- fuzzy_mt_decomposition(axes, list_m6s=list_m6s, fontsize=fontsize)
+ height = 1.5 + (n_sources - 1) * 0.65
+ fig = plt.figure(figsize=(6.0, height))
+ fig.subplots_adjust(left=0.01, right=0.99, bottom=0.03, top=0.97)
+ axes = fig.add_subplot(1, 1, 1)
- if not po.outformat == "display":
- logger.info("saving figure to %s" % outpath)
- fig.savefig(outpath, dpi=po.dpi)
- else:
- plt.show()
+ fuzzy_mt_decomposition(axes, list_m6s=list_m6s, fontsize=fontsize)
- else:
- logger.info("Plot already exists! Please use --force to overwrite!")
+ save_figs([fig], outpath, po.outformat, po.dpi)
def station_variance_reductions(problem, stage, plot_options):
@@ -1917,7 +1889,7 @@ def target_network_station(target):
composite = problem.composites["seismic"]
fontsize = 8
- fontsize_title = 10
+ # fontsize_title = 10
labelpad = 1 # distance between ticks and label
target_index = dict((target, i) for (i, target) in enumerate(composite.targets))
@@ -1943,13 +1915,13 @@ def target_network_station(target):
bresults = composite.assemble_results(
best_point, outmode="tapered_data", chop_bounds=chop_bounds
)
- synth_plot_flag = True
+ # synth_plot_flag = True
else:
# get dummy results for data
logger.warning('Got "None" post_llk, still loading MAP for VR calculation')
best_point = get_result_point(stage.mtrace, "max")
bresults = composite.assemble_results(best_point, chop_bounds=chop_bounds)
- synth_plot_flag = False
+ # synth_plot_flag = False
tpoint = get_weights_point(composite, best_point, problem.config)
@@ -1990,6 +1962,13 @@ def target_network_station(target):
cg_to_target_codes = utility.gather(unique_target_codes, lambda t: t[3])
+ if len(ns_id_to_target_codes) == 0:
+ logger.info(
+ "Did not find targets for event %s, skipping plotting ..."
+ % event.__str__()
+ )
+ continue
+
# get channel group specific mean variance reductions
cg_var_reductions = {}
for cg, target_codes in cg_to_target_codes.items():
@@ -2022,7 +2001,7 @@ def target_network_station(target):
dist = source.distance_to(target)
data.append(dist)
- dists = num.array(data, dtype=num.float)
+ dists = num.array(data, dtype=num.float64)
iorder = num.argsort(dists)
sorted_dists = dists[iorder] / km
@@ -2160,7 +2139,6 @@ def target_network_station(target):
def draw_station_variance_reductions(problem, po):
-
if "seismic" not in list(problem.composites.keys()):
raise TypeError("No seismic composite defined for this problem!")
@@ -2191,29 +2169,13 @@ def draw_station_variance_reductions(problem, po):
"station_variance_reductions_%s_%s_%i" % (stage.number, llk_str, po.nensemble),
)
- if not os.path.exists(outpath) or po.force:
- event_figs = station_variance_reductions(problem, stage, po)
- else:
- logger.info(
- "station variance reductions plot exists. Use force=True for replotting!"
- )
+ if plot_exists(outpath, po.outformat, po.force):
return
- if po.outformat == "display":
- plt.show()
- else:
- for event_idx, figs in event_figs:
- event_outpath = "{}_{}".format(outpath, event_idx)
- logger.info("saving figures to %s" % event_outpath)
- if po.outformat == "pdf":
- with PdfPages(event_outpath + ".pdf") as opdf:
- for fig in figs:
- opdf.savefig(fig)
- else:
- for i, fig in enumerate(figs):
- fig.savefig(
- event_outpath + "_%i.%s" % (i, po.outformat), dpi=po.dpi
- )
+ event_figs = station_variance_reductions(problem, stage, po)
+ for event_idx, figs in event_figs:
+ event_outpath = f"{outpath}_{event_idx}"
+ save_figs(figs, event_outpath, po.outformat, po.dpi)
def draw_hudson(problem, po):
@@ -2244,6 +2206,16 @@ def draw_hudson(problem, po):
beachballsize_small = beachballsize * 0.5
for idx_source, (m6s, best_mt) in enumerate(zip(list_m6s, list_best_mts)):
+ outpath = os.path.join(
+ problem.outfolder,
+ po.figure_dir,
+ "hudson_%i_%s_%i_%i.%s"
+ % (po.load_stage, llk_str, po.nensemble, idx_source, po.outformat),
+ )
+
+ if plot_exists(outpath, po.outformat, po.force):
+ return
+
fig = plt.figure(figsize=(4.0, 4.0))
fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)
axes = fig.add_subplot(1, 1, 1)
@@ -2332,23 +2304,8 @@ def draw_hudson(problem, po):
"skipping drawing ..."
)
- outpath = os.path.join(
- problem.outfolder,
- po.figure_dir,
- "hudson_%i_%s_%i_%i.%s"
- % (po.load_stage, llk_str, po.nensemble, idx_source, po.outformat),
- )
-
- if not os.path.exists(outpath) or po.force or po.outformat == "display":
-
- if not po.outformat == "display":
- logger.info("saving figure to %s" % outpath)
- fig.savefig(outpath, dpi=po.dpi)
- else:
- plt.show()
-
- else:
- logger.info("Plot already exists! Please use --force to overwrite!")
+ logger.info("saving figure to %s" % outpath)
+ fig.savefig(outpath, dpi=po.dpi)
def draw_data_stations(
@@ -2397,7 +2354,6 @@ def draw_data_stations(
def draw_events(gmt, events, *args, **kwargs):
-
ev_lons = [ev.lon for ev in events]
ev_lats = [ev.lat for ev in events]
@@ -2467,7 +2423,7 @@ def gmt_station_map_azimuthal(
max_distance,
data_cpt,
scale_label,
- *("-J%s" % J_location, "-R%s" % R_location, "-St14p")
+ *("-J%s" % J_location, "-R%s" % R_location, "-St14p"),
)
else:
st_lons = [station.lon for station in stations]
@@ -2501,7 +2457,7 @@ def gmt_station_map_azimuthal(
gmt,
[event],
*("-J%s" % J_location, "-R%s" % R_location),
- **dict(G="orange", S="a14p")
+ **dict(G="orange", S="a14p"),
)
@@ -2560,95 +2516,93 @@ def draw_station_map_gmt(problem, po):
% (wmap.name, wmap.mapnumber, value_string, po.outformat),
)
+ if plot_exists(outpath, po.outformat, po.force):
+ continue
+
dist = max(wmap.get_distances_deg())
- if not os.path.exists(outpath) or po.force:
- if point:
- time_shifts = extract_time_shifts(point, sc.hierarchicals, wmap)
- else:
- time_shifts = None
+ if point:
+ time_shifts = extract_time_shifts(point, sc.hierarchicals, wmap)
+ else:
+ time_shifts = None
- if dist > 30:
- logger.info(
- "Using equidistant azimuthal projection for"
- " teleseismic setup of wavemap %s." % wmap._mapid
- )
+ if dist > 30:
+ logger.info(
+ "Using equidistant azimuthal projection for"
+ " teleseismic setup of wavemap %s." % wmap._mapid
+ )
- gmt = gmtpy.GMT(config=gmtconfig)
- gmt_station_map_azimuthal(
- gmt,
- wmap.stations,
- event,
- data=time_shifts,
- max_distance=dist,
- width=w,
- bin_width=bin_width,
- fontsize=fontsize,
- font=font,
- )
+ gmt = gmtpy.GMT(config=gmtconfig)
+ gmt_station_map_azimuthal(
+ gmt,
+ wmap.stations,
+ event,
+ data=time_shifts,
+ max_distance=dist,
+ width=w,
+ bin_width=bin_width,
+ fontsize=fontsize,
+ font=font,
+ )
- gmt.save(outpath, resolution=po.dpi, size=w)
+ gmt.save(outpath, resolution=po.dpi, size=w)
+ else:
+ logger.info(
+ "Using equidistant projection for regional setup "
+ "of wavemap %s." % wmap._mapid
+ )
+ from pyrocko import orthodrome as otd
+ from pyrocko.automap import Map
+
+ m = Map(
+ lat=event.lat,
+ lon=event.lon,
+ radius=dist * otd.d2m,
+ width=h,
+ height=h,
+ show_grid=True,
+ show_topo=True,
+ show_scale=True,
+ color_dry=(143, 188, 143), # grey
+ illuminate=True,
+ illuminate_factor_ocean=0.15,
+ # illuminate_factor_land = 0.2,
+ show_rivers=True,
+ show_plates=False,
+ gmt_config=gmtconfig,
+ )
- else:
- logger.info(
- "Using equidistant projection for regional setup "
- "of wavemap %s." % wmap._mapid
- )
- from pyrocko import orthodrome as otd
- from pyrocko.automap import Map
-
- m = Map(
- lat=event.lat,
- lon=event.lon,
- radius=dist * otd.d2m,
- width=h,
- height=h,
- show_grid=True,
- show_topo=True,
- show_scale=True,
- color_dry=(143, 188, 143), # grey
- illuminate=True,
- illuminate_factor_ocean=0.15,
- # illuminate_factor_land = 0.2,
- show_rivers=True,
- show_plates=False,
- gmt_config=gmtconfig,
+ if time_shifts:
+ sargs = m.jxyr + ["-St14p"]
+ draw_data_stations(
+ m.gmt,
+ wmap.stations,
+ time_shifts,
+ dist,
+ data_cpt=None,
+ scale_label="time shifts [s]",
+ *sargs,
)
- if time_shifts:
- sargs = m.jxyr + ["-St14p"]
- draw_data_stations(
- m.gmt,
- wmap.stations,
- time_shifts,
- dist,
- data_cpt=None,
- scale_label="time shifts [s]",
- *sargs
- )
-
- for st in wmap.stations:
- text = "{}.{}".format(st.network, st.station)
- m.add_label(lat=st.lat, lon=st.lon, text=text)
- else:
- m.add_stations(
- wmap.stations, psxy_style=dict(S="t14p", G="red")
- )
+ for st in wmap.stations:
+ text = "{}.{}".format(st.network, st.station)
+ m.add_label(lat=st.lat, lon=st.lon, text=text)
+ else:
+ m.add_stations(wmap.stations, psxy_style=dict(S="t14p", G="red"))
- draw_events(m.gmt, [event], *m.jxyr, **dict(G="yellow", S="a14p"))
- m.save(outpath, resolution=po.dpi, oversample=2.0)
+ draw_events(m.gmt, [event], *m.jxyr, **dict(G="yellow", S="a14p"))
+ m.save(outpath, resolution=po.dpi, oversample=2.0)
- logger.info("saving figure to %s" % outpath)
- else:
- logger.info("Plot exists! Use --force to overwrite!")
+ logger.info("saving figure to %s" % outpath)
def draw_lune_plot(problem, po):
-
if po.outformat == "svg":
raise NotImplementedError("SVG format is not supported for this plot!")
- if problem.config.problem_config.source_type != "MTQTSource":
+ try:
+ idx = problem.config.problem_config.source_types.index("MTQTSource")
+ except ValueError:
raise TypeError("Lune plot is only supported for the MTQTSource!")
if po.load_stage is None:
@@ -2657,9 +2611,9 @@ def draw_lune_plot(problem, po):
stage = load_stage(problem, stage_number=po.load_stage, load="trace", chains=[-1])
n_mts = len(stage.mtrace)
- n_sources = problem.config.problem_config.n_sources
+ n_source = problem.config.problem_config.n_sources[idx]
- for idx_source in range(n_sources):
+ for idx_source in range(n_source):
result_ensemble = {}
for varname in ["v", "w"]:
try:
@@ -2716,7 +2670,6 @@ def draw_lune_plot(problem, po):
def lune_plot(v_tape=None, w_tape=None, reference_v_tape=None, reference_w_tape=None):
-
from beat.sources import v_to_gamma, w_to_delta
if len(gmtpy.detect_gmt_installations()) < 1:
@@ -2726,14 +2679,12 @@ def lune_plot(v_tape=None, w_tape=None, reference_v_tape=None, reference_w_tape=
font = "1"
def draw_lune_arcs(gmt, R, J):
-
lons = [30.0, -30.0, 30.0, -30.0]
lats = [54.7356, 35.2644, -35.2644, -54.7356]
gmt.psxy(in_columns=(lons, lats), N=True, W="1p,black", R=R, J=J)
def draw_lune_points(gmt, R, J, labels=True):
-
lons = [0.0, -30.0, -30.0, -30.0, 0.0, 30.0, 30.0, 30.0, 0.0]
lats = [-90.0, -54.7356, 0.0, 35.2644, 90.0, 54.7356, 0.0, -35.2644, 0.0]
annotations = ["-ISO", "", "+CLVD", "+LVD", "+ISO", "", "-CLVD", "-LVD", "DC"]
@@ -2745,7 +2696,6 @@ def draw_lune_points(gmt, R, J, labels=True):
if labels:
farg = ["-F+f+j"]
for lon, lat, text, align in zip(lons, lats, annotations, alignments):
-
rows.append(
(lon, lat, "%i,%s,%s" % (fontsize, font, "black"), align, text)
)
@@ -2805,7 +2755,6 @@ def check_fixed(a, varname):
# -Ctmp_$out.cpt -I -N -A- -O -K >> $ps
def draw_reference_lune(gmt, R, J, reference_v_tape, reference_w_tape):
-
gamma = num.rad2deg(v_to_gamma(reference_v_tape)) # lune longitude [rad]
delta = num.rad2deg(w_to_delta(reference_w_tape)) # lune latitude [rad]
diff --git a/beat/theanof.py b/beat/pytensorf.py
similarity index 61%
rename from beat/theanof.py
rename to beat/pytensorf.py
index 77d5f572..6a6182a8 100644
--- a/beat/theanof.py
+++ b/beat/pytensorf.py
@@ -1,31 +1,30 @@
"""
-Package for wrapping various functions into Theano-Ops to be able to include
-them into theano graphs as is needed by the pymc3 models.
+Package for wrapping various functions into pytensor-Ops to be able to include
+them into pytensor graphs as is needed by the pymc models.
Far future:
include a 'def grad:' -method to each Op in order to enable the use of
gradient based optimization algorithms
"""
-import copy
import logging
from collections import OrderedDict
import numpy as num
-import theano
-import theano.tensor as tt
-from pymc3.model import FreeRV
+import pytensor.tensor as tt
+from pyrocko.gf import LocalEngine
from pyrocko.trace import nextpow2
+from pytensor.graph import Apply
-from beat import heart, interseismic, utility
+from beat import heart, utility
from beat.fast_sweeping import fast_sweep
km = 1000.0
-logger = logging.getLogger("theanof")
+logger = logging.getLogger("pytensorf")
-class GeoSynthesizer(theano.Op):
+class GeoSynthesizer(tt.Op):
"""
- Theano wrapper for a geodetic forward model with synthetic displacements.
+ pytensor wrapper for a geodetic forward model with synthetic displacements.
Uses pyrocko engine and fomosto GF stores.
Input order does not matter anymore! Did in previous version.
@@ -36,18 +35,27 @@ class GeoSynthesizer(theano.Op):
containing :class:`pyrocko.gf.seismosizer.Source` Objects
targets : List
containing :class:`pyrocko.gf.targets.StaticTarget` Objects
+ mapping : Dict
+ variable names and list of integers how they map to source objects
"""
- __props__ = ("engine", "sources", "targets")
+ __props__ = ("engine", "sources", "targets", "mapping")
+
+ def __init__(self, engine, sources, targets, mapping):
+ if isinstance(engine, LocalEngine):
+ self.outmode = "stacked_array"
+ else:
+ self.outmode = "array"
- def __init__(self, engine, sources, targets):
self.engine = engine
self.sources = tuple(sources)
self.targets = tuple(targets)
self.nobs = sum([target.lats.size for target in self.targets])
+ self.mapping = mapping
def __getstate__(self):
- self.engine.close_cashed_stores()
+ if isinstance(self.engine, LocalEngine):
+ self.engine.close_cashed_stores()
return self.__dict__
def __setstate__(self, state):
@@ -55,7 +63,7 @@ def __setstate__(self, state):
def make_node(self, inputs):
"""
- Transforms theano tensors to node and allocates variables accordingly.
+ Transforms pytensor tensors to node and allocates variables accordingly.
Parameters
----------
@@ -63,7 +71,7 @@ def make_node(self, inputs):
keys being strings of source attributes of the
:class:`pscmp.RectangularSource` that was used to initialise
the Operator
- values are :class:`theano.tensor.Tensor`
+ values are :class:`pytensor.tensor.Tensor`
"""
inlist = []
@@ -72,9 +80,11 @@ def make_node(self, inputs):
for i in inputs.values():
inlist.append(tt.as_tensor_variable(i))
- outm = tt.as_tensor_variable(num.zeros((2, 2)))
+ outm_shape = self.infer_shape()[0]
+
+ outm = tt.as_tensor_variable(num.zeros(outm_shape))
outlist = [outm.type()]
- return theano.Apply(self, inlist, outlist)
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
"""
@@ -85,10 +95,8 @@ def perform(self, node, inputs, output):
inputs : list
of :class:`numpy.ndarray`
output : list
- 1) of synthetic waveforms of :class:`numpy.ndarray`
- (n x nsamples)
- 2) of start times of the first waveform samples
- :class:`numpy.ndarray` (n x 1)
+ 1) of synthetic waveforms of :class:`numpy.ndarray` (n x nsamples)
+ 2) of start times of the first waveform samples :class:`numpy.ndarray` (n x 1)
"""
synths = output[0]
@@ -96,7 +104,11 @@ def perform(self, node, inputs, output):
mpoint = utility.adjust_point_units(point)
- source_points = utility.split_point(mpoint)
+ source_points = utility.split_point(
+ mpoint,
+ mapping=self.mapping,
+ weed_params=True,
+ )
for i, source in enumerate(self.sources):
utility.update_source(source, **source_points[i])
@@ -107,195 +119,16 @@ def perform(self, node, inputs, output):
engine=self.engine,
targets=self.targets,
sources=self.sources,
- outmode="stacked_array",
+ outmode=self.outmode,
)
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
return [(self.nobs, 3)]
-class GeoLayerSynthesizerPsCmp(theano.Op):
- """
- Theano wrapper for a geodetic forward model for static observation
- points. Direct call to PsCmp, needs PsGrn Greens Function store!
- Deprecated, currently not used in composites.
-
- Parameters
- ----------
- lats : n x 1 :class:`numpy.ndarray`
- with latitudes of observation points
- lons : n x 1 :class:`numpy.ndarray`
- with longitudes of observation points
- store_superdir : str
- with absolute path to the GF store super directory
- crust_ind : int
- with the index to the GF store
- sources : :class:`pscmp.RectangularSource`
- to be used in generating the synthetic displacements
- """
-
- __props__ = ("lats", "lons", "store_superdir", "crust_ind", "sources")
-
- def __init__(self, lats, lons, store_superdir, crust_ind, sources):
- self.lats = tuple(lats)
- self.lons = tuple(lons)
- self.store_superdir = store_superdir
- self.crust_ind = crust_ind
- self.sources = tuple(sources)
-
- def __getstate__(self):
- return self.__dict__
-
- def __setstate__(self, state):
- self.__dict__.update(state)
-
- def make_node(self, inputs):
- """
- Transforms theano tensors to node and allocates variables accordingly.
-
- Parameters
- ----------
- inputs : dict
- keys being strings of source attributes of the
- :class:`pscmp.RectangularSource` that was used to initialise
- the Operator
- values are :class:`theano.tensor.Tensor`
- """
- inlist = []
- self.varnames = list(inputs.keys())
-
- for i in inputs.values():
- inlist.append(tt.as_tensor_variable(i))
-
- out = tt.as_tensor_variable(num.zeros((2, 2)))
- outlist = [out.type()]
- return theano.Apply(self, inlist, outlist)
-
- def perform(self, node, inputs, output):
- """
- Perform method of the Operator to calculate synthetic displacements.
-
- Parameters
- ----------
- inputs : list
- of :class:`numpy.ndarray`
- output : list
- of synthetic displacements of :class:`numpy.ndarray` (n x 1)
- """
- z = output[0]
-
- point = {vname: i for vname, i in zip(self.varnames, inputs)}
-
- point = utility.adjust_point_units(point)
-
- source_points = utility.split_point(point)
-
- for i, source in enumerate(self.sources):
- source.update(**source_points[i])
-
- z[0] = heart.geo_layer_synthetics_pscmp(
- store_superdir=self.store_superdir,
- crust_ind=self.crust_ind,
- lons=self.lons,
- lats=self.lats,
- sources=self.sources,
- )
-
- def infer_shape(self, node, input_shapes):
- return [(len(self.lats), 3)]
-
-
-class GeoInterseismicSynthesizer(theano.Op):
+class SeisSynthesizer(tt.Op):
"""
- Theano wrapper to transform the parameters of block model to
- parameters of a fault.
- """
-
- __props__ = ("lats", "lons", "engine", "targets", "sources", "reference")
-
- def __init__(self, lats, lons, engine, targets, sources, reference):
- self.lats = tuple(lats)
- self.lons = tuple(lons)
- self.engine = engine
- self.targets = tuple(targets)
- self.sources = tuple(sources)
- self.reference = reference
-
- def __getstate__(self):
- return self.__dict__
-
- def __setstate__(self, state):
- self.__dict__.update(state)
-
- def make_node(self, inputs):
- """
- Transforms theano tensors to node and allocates variables accordingly.
-
- Parameters
- ----------
- inputs : dict
- keys being strings of source attributes of the
- :class:`pyrocko.gf.seismosizer.RectangularSource` that was used
- to initialise the Operator.
- values are :class:`theano.tensor.Tensor`
- """
- inlist = []
-
- self.fixed_values = {}
- self.varnames = []
-
- for k, v in inputs.items():
- if isinstance(v, FreeRV):
- self.varnames.append(k)
- inlist.append(tt.as_tensor_variable(v))
- else:
- self.fixed_values[k] = v
-
- out = tt.as_tensor_variable(num.zeros((2, 2)))
- outlist = [out.type()]
- return theano.Apply(self, inlist, outlist)
-
- def perform(self, node, inputs, output):
- """
- Perform method of the Operator to calculate synthetic displacements.
-
- Parameters
- ----------
- inputs : list
- of :class:`numpy.ndarray`
- output : list
- of synthetic displacements of :class:`numpy.ndarray` (n x 3)
- """
- z = output[0]
-
- point = {vname: i for vname, i in zip(self.varnames, inputs)}
- point.update(self.fixed_values)
-
- point = utility.adjust_point_units(point)
- spoint, bpoint = interseismic.seperate_point(point)
-
- source_points = utility.split_point(spoint)
-
- for i, source_point in enumerate(source_points):
- self.sources[i].update(**source_point)
-
- z[0] = interseismic.geo_backslip_synthetics(
- engine=self.engine,
- targets=self.targets,
- sources=self.sources,
- lons=num.array(self.lons),
- lats=num.array(self.lats),
- reference=self.reference,
- **bpoint
- )
-
- def infer_shape(self, node, input_shapes):
- return [(len(self.lats), 3)]
-
-
-class SeisSynthesizer(theano.Op):
- """
- Theano wrapper for a seismic forward model with synthetic waveforms.
+ pytensor wrapper for a seismic forward model with synthetic waveforms.
Input order does not matter anymore! Did in previous version.
Parameters
@@ -315,6 +148,7 @@ class SeisSynthesizer(theano.Op):
__props__ = (
"engine",
"sources",
+ "mapping",
"targets",
"event",
"arrival_taper",
@@ -330,6 +164,7 @@ def __init__(
self,
engine,
sources,
+ mapping,
targets,
event,
arrival_taper,
@@ -354,6 +189,7 @@ def __init__(
self.sample_rate = self.engine.get_store(
self.targets[0].store_id
).config.sample_rate
+ self.mapping = mapping
if self.domain == "spectrum":
nsamples = nextpow2(self.arrival_taper.nsamples(self.sample_rate))
@@ -373,7 +209,7 @@ def __setstate__(self, state):
def make_node(self, inputs):
"""
- Transforms theano tensors to node and allocates variables accordingly.
+ Transforms pytensor tensors to node and allocates variables accordingly.
Parameters
----------
@@ -381,7 +217,7 @@ def make_node(self, inputs):
keys being strings of source attributes of the
:class:`pscmp.RectangularSource` that was used to initialise
the Operator
- values are :class:`theano.tensor.Tensor`
+ values are :class:`pytensor.tensor.Tensor`
"""
inlist = []
@@ -390,10 +226,12 @@ def make_node(self, inputs):
for i in inputs.values():
inlist.append(tt.as_tensor_variable(i))
- outm = tt.as_tensor_variable(num.zeros((2, 2)))
- outv = tt.as_tensor_variable(num.zeros((2)))
+ outm_shape, outv_shape = self.infer_shape()
+
+ outm = tt.as_tensor_variable(num.zeros(outm_shape))
+ outv = tt.as_tensor_variable(num.zeros(outv_shape))
outlist = [outm.type(), outv.type()]
- return theano.Apply(self, inlist, outlist)
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
"""
@@ -404,10 +242,8 @@ def perform(self, node, inputs, output):
inputs : list
of :class:`numpy.ndarray`
output : list
- 1) of synthetic waveforms of :class:`numpy.ndarray`
- (n x nsamples)
- 2) of start times of the first waveform samples
- :class:`numpy.ndarray` (n x 1)
+ 1) of synthetic waveforms of :class:`numpy.ndarray` (n x nsamples)
+ 2) of start times of the first waveform samples :class:`numpy.ndarray` (n x 1)
"""
synths = output[0]
tmins = output[1]
@@ -422,7 +258,11 @@ def perform(self, node, inputs, output):
else:
arrival_times = num.array(self.arrival_times)
- source_points = utility.split_point(mpoint)
+ source_points = utility.split_point(
+ mpoint,
+ mapping=self.mapping,
+ weed_params=True,
+ )
for i, source in enumerate(self.sources):
utility.update_source(source, **source_points[i])
@@ -450,9 +290,9 @@ def perform(self, node, inputs, output):
pad_to_pow2=True,
)
else:
- ValueError('Domain "%" not supported!' % self.domain)
+ ValueError('Domain "%s" not supported!' % self.domain)
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
nrow = len(self.targets)
if self.domain == "time":
@@ -462,8 +302,7 @@ def infer_shape(self, node, input_shapes):
return [(nrow, ncol), (nrow,)]
-class PolaritySynthesizer(theano.Op):
-
+class PolaritySynthesizer(tt.Op):
__props__ = ("engine", "source", "pmap", "is_location_fixed", "always_raytrace")
def __init__(self, engine, source, pmap, is_location_fixed, always_raytrace):
@@ -472,6 +311,7 @@ def __init__(self, engine, source, pmap, is_location_fixed, always_raytrace):
self.pmap = pmap
self.is_location_fixed = is_location_fixed
self.always_raytrace = always_raytrace
+ # TODO check if mutli-source-type refactoring did not break anything
def __getstate__(self):
self.engine.close_cashed_stores()
@@ -486,16 +326,21 @@ def make_node(self, inputs):
for i in inputs.values():
inlist.append(tt.as_tensor_variable(i))
- out = tt.as_tensor_variable(num.zeros((2)))
- outlist = [out.type()]
- return theano.Apply(self, inlist, outlist)
+ outv_shape = self.infer_shape()[0]
+ outv = tt.as_tensor_variable(num.zeros(outv_shape))
+
+ outlist = [outv.type()]
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
synths = output[0]
point = {vname: i for vname, i in zip(self.varnames, inputs)}
mpoint = utility.adjust_point_units(point)
- source_points = utility.split_point(mpoint)
+ source_points = utility.split_point(
+ mpoint,
+ n_sources_total=1,
+ )
utility.update_source(self.source, **source_points[self.pmap.config.event_idx])
if not self.is_location_fixed:
@@ -511,11 +356,11 @@ def perform(self, node, inputs, output):
self.source, radiation_weights=self.pmap.get_radiation_weights()
)
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
return [(self.pmap.n_t,)]
-class SeisDataChopper(theano.Op):
+class SeisDataChopper(tt.Op):
"""
Deprecated!
"""
@@ -533,10 +378,11 @@ def make_node(self, *inputs):
for i in inputs:
inlist.append(tt.as_tensor_variable(i))
- outm = tt.as_tensor_variable(num.zeros((2, 2)))
+ outm_shape = self.infer_shape()[0]
+ outm = tt.as_tensor_variable(num.zeros(outm_shape))
outlist = [outm.type()]
- return theano.Apply(self, inlist, outlist)
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
tmins = inputs[0]
@@ -546,15 +392,15 @@ def perform(self, node, inputs, output):
self.traces, self.arrival_taper, self.filterer, tmins, outmode="array"
)
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
nrow = len(self.traces)
ncol = self.arrival_taper.nsamples(self.sample_rate)
return [(nrow, ncol)]
-class Sweeper(theano.Op):
+class Sweeper(tt.Op):
"""
- Theano Op for C implementation of the fast sweep algorithm.
+ pytensor Op for C implementation of the fast sweep algorithm.
Parameters
----------
@@ -569,7 +415,6 @@ class Sweeper(theano.Op):
__props__ = ("patch_size", "n_patch_dip", "n_patch_strike", "implementation")
def __init__(self, patch_size, n_patch_dip, n_patch_strike, implementation):
-
self.patch_size = num.float64(patch_size)
self.n_patch_dip = n_patch_dip
self.n_patch_strike = n_patch_strike
@@ -580,9 +425,11 @@ def make_node(self, *inputs):
for i in inputs:
inlist.append(tt.as_tensor_variable(i))
- outv = tt.as_tensor_variable(num.zeros((2)))
+ outv_shape = self.infer_shape()[0]
+ outv = tt.as_tensor_variable(num.zeros(outv_shape))
+
outlist = [outv.type()]
- return theano.Apply(self, inlist, outlist)
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
"""
@@ -643,13 +490,13 @@ def perform(self, node, inputs, output):
logger.debug("Done sweeping!")
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
return [(self.n_patch_dip * self.n_patch_strike,)]
-class EulerPole(theano.Op):
+class EulerPole(tt.Op):
"""
- Theano Op for rotation of geodetic observations around Euler Pole.
+ pytensor Op for rotation of geodetic observations around Euler Pole.
Parameters
----------
@@ -664,7 +511,6 @@ class EulerPole(theano.Op):
__props__ = ("lats", "lons", "data_mask")
def __init__(self, lats, lons, data_mask):
-
self.lats = tuple(lats)
self.lons = tuple(lons)
self.data_mask = tuple(data_mask)
@@ -677,18 +523,18 @@ def make_node(self, inputs):
for k, v in inputs.items():
varname = k.split("_")[-1] # split of dataset naming
- if isinstance(v, FreeRV):
+ if isinstance(v, tt.TensorVariable):
self.varnames.append(varname)
inlist.append(tt.as_tensor_variable(v))
else:
self.fixed_values[varname] = v
- outv = tt.as_tensor_variable(num.zeros((2, 2)))
- outlist = [outv.type()]
- return theano.Apply(self, inlist, outlist)
+ outm_shape = self.infer_shape()[0]
+ outm = tt.as_tensor_variable(num.zeros(outm_shape))
+ outlist = [outm.type()]
+ return Apply(self, inlist, outlist)
def perform(self, node, inputs, output):
-
z = output[0]
point = {vname: i for vname, i in zip(self.varnames, inputs)}
point.update(self.fixed_values)
@@ -706,13 +552,13 @@ def perform(self, node, inputs, output):
z[0] = velocities
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
return [(len(self.lats), 3)]
-class StrainRateTensor(theano.Op):
+class StrainRateTensor(tt.Op):
"""
- TheanoOp for internal block deformation through 2d area strain rate tensor.
+ pytensorOp for internal block deformation through 2d area strain rate tensor.
Parameters
----------
@@ -727,20 +573,11 @@ class StrainRateTensor(theano.Op):
__props__ = ("lats", "lons", "data_mask")
def __init__(self, lats, lons, data_mask):
-
self.lats = tuple(lats)
self.lons = tuple(lons)
self.data_mask = tuple(data_mask)
self.ndata = len(self.lats)
- station_idxs = [
- station_idx
- for station_idx in range(self.ndata)
- if station_idx not in data_mask
- ]
-
- self.station_idxs = tuple(station_idxs)
-
def make_node(self, inputs):
inlist = []
@@ -749,18 +586,19 @@ def make_node(self, inputs):
for k, v in inputs.items():
varname = k.split("_")[-1] # split of dataset naming
- if isinstance(v, FreeRV):
+ if isinstance(v, tt.TensorVariable):
self.varnames.append(varname)
inlist.append(tt.as_tensor_variable(v))
else:
self.fixed_values[varname] = v
- outv = tt.as_tensor_variable(num.zeros((2, 2)))
- outlist = [outv.type()]
- return theano.Apply(self, inlist, outlist)
+ outm_shape = self.infer_shape()[0]
- def perform(self, node, inputs, output):
+ outm = tt.as_tensor_variable(num.zeros(outm_shape))
+ outlist = [outm.type()]
+ return Apply(self, inlist, outlist)
+ def perform(self, node, inputs, output):
z = output[0]
point = {vname: i for vname, i in zip(self.varnames, inputs)}
@@ -771,20 +609,19 @@ def perform(self, node, inputs, output):
exy = point["exy"]
rotation = point["rotation"]
- valid = num.array(self.station_idxs)
-
v_xyz = heart.velocities_from_strain_rate_tensor(
- num.array(self.lats)[valid],
- num.array(self.lons)[valid],
+ num.array(self.lats),
+ num.array(self.lons),
exx=exx,
eyy=eyy,
exy=exy,
rotation=rotation,
)
- v_xyz_all = num.zeros((self.ndata, 3))
- v_xyz_all[valid, :] = v_xyz
- z[0] = v_xyz_all
+ if self.data_mask:
+ v_xyz[num.array(self.data_mask), :] = 0.0
+
+ z[0] = v_xyz
- def infer_shape(self, node, input_shapes):
+ def infer_shape(self, fgraph=None, node=None, input_shapes=None):
return [(self.ndata, 3)]
diff --git a/beat/sampler/base.py b/beat/sampler/base.py
index 6e184f67..427bc5bb 100644
--- a/beat/sampler/base.py
+++ b/beat/sampler/base.py
@@ -12,11 +12,9 @@
standard_cauchy,
standard_exponential,
)
-from pymc3 import CompoundStep
-from pymc3.model import Point, modelcontext
-from pymc3.sampling import stop_tuning
-from pymc3.theanof import join_nonshared_inputs
-from theano import function
+from pymc import CompoundStep
+from pymc.model import Point
+from pymc.pytensorf import compile_pymc, join_nonshared_inputs
from tqdm import tqdm
from beat import parallel
@@ -75,7 +73,7 @@ def multivariate_t_rvs(mean, cov, df=np.inf, size=1):
class Proposal(object):
"""
- Proposal distributions modified from pymc3 to initially create all the
+ Proposal distributions modified from pymc to initially create all the
Proposal steps without repeated execution of the RNG- significant speedup!
Parameters
@@ -220,9 +218,10 @@ def choose_proposal(proposal_name, **kwargs):
Returns
-------
- class:`pymc3.Proposal` Object
+ class:`pymc.Proposal` Object
"""
- return proposal_distributions[proposal_name](**kwargs)
+ proposal = proposal_distributions[proposal_name](**kwargs)
+ return proposal
def setup_chain_counter(n_chains, n_jobs):
@@ -232,7 +231,6 @@ def setup_chain_counter(n_chains, n_jobs):
class ChainCounter(object):
def __init__(self, n, n_jobs, perc_disp=0.2, subject="chains"):
-
n_chains_worker = n // n_jobs
frac_disp = int(np.ceil(n_chains_worker * perc_disp))
@@ -261,19 +259,21 @@ def __call__(self, i):
def _sample(
draws,
- step=None,
start=None,
trace=None,
chain=0,
tune=None,
progressbar=True,
- model=None,
random_seed=-1,
):
+ n = parallel.get_process_id()
+ logger.debug("Worker %i deserialises step", n)
+ # step = cloudpickle.loads(step_method_pickled)
+ step = step_method_global
shared_params = [
sparam
- for sparam in step.logp_forw.get_shared()
+ for sparam in step.logp_forw_func.get_shared()
if sparam.name in parallel._tobememshared
]
@@ -281,9 +281,7 @@ def _sample(
logger.debug("Accessing shared memory")
parallel.borrow_all_memories(shared_params, parallel._shared_memory)
- sampling = _iter_sample(draws, step, start, trace, chain, tune, model, random_seed)
-
- n = parallel.get_process_id()
+ sampling = _iter_sample(draws, step, start, trace, chain, tune, random_seed)
if progressbar:
sampling = tqdm(
@@ -322,7 +320,6 @@ def _iter_sample(
trace=None,
chain=0,
tune=None,
- model=None,
random_seed=-1,
overwrite=True,
update_proposal=False,
@@ -335,8 +332,6 @@ def _iter_sample(
adaptiv step-size scaling is stopped after this chain sample
"""
- model = modelcontext(model)
-
draws = int(draws)
if draws < 1:
@@ -353,18 +348,18 @@ def _iter_sample(
except TypeError:
pass
- point = Point(start, model=model)
+ point = Point(start)
step.chain_index = chain
trace.setup(draws, chain, overwrite=overwrite)
for i in range(draws):
- if i == tune:
- step = stop_tuning(step)
+ if i == tune: # stop tuning
+ step.tune = False
logger.debug("Step: Chain_%i step_%i" % (chain, i))
point, out_list = step.step(point)
-
+ # print("before buffer", out_list, point)
try:
trace.buffer_write(out_list, step.cumulative_samples)
except BufferError: # buffer full
@@ -423,6 +418,13 @@ def init_chain_hypers(problem):
problem.update_llks(point)
+def set_global_step_method(step):
+ global step_method_global
+ logger.debug("Setting global step method")
+ # step_method_pickled = cloudpickle.dumps(step, protocol=-1)
+ step_method_global = step
+
+
def iter_parallel_chains(
draws,
step,
@@ -454,7 +456,7 @@ def iter_parallel_chains(
with absolute path to the directory where to store the sampling results
progressbar : boolean
flag for displaying a progressbar
- model : :class:`pymc3.model.Model` instance
+ model : :class:`pymc.model.Model` instance
holds definition of the forward problem
n_jobs : int
number of jobs to run in parallel, must not be higher than the
@@ -516,13 +518,11 @@ def iter_parallel_chains(
work = [
(
draws,
- step,
step.population[step.resampling_indexes[chain]],
trace,
chain,
None,
progressbar,
- model,
rseed,
)
for chain, rseed, trace in zip(chains, random_seeds, trace_list)
@@ -541,7 +541,7 @@ def iter_parallel_chains(
if n_jobs > 1 and True:
shared_params = [
sparam
- for sparam in step.logp_forw.get_shared()
+ for sparam in step.logp_forw_func.get_shared()
if sparam.name in parallel._tobememshared
]
@@ -566,8 +566,8 @@ def iter_parallel_chains(
chunksize=chunksize,
timeout=timeout,
nprocs=n_jobs,
- initializer=initializer,
- initargs=initargs,
+ initializer=set_global_step_method,
+ initargs=[step],
)
logger.info("Sampling ...")
@@ -595,21 +595,22 @@ def iter_parallel_chains(
return mtrace
-def logp_forw(out_vars, vars, shared):
+def logp_forw(point, out_vars, in_vars, shared):
"""
- Compile Theano function of the model and the input and output variables.
+ Compile Pytensor function of the model and the input and output variables.
Parameters
----------
out_vars : List
- containing :class:`pymc3.Distribution` for the output variables
- vars : List
- containing :class:`pymc3.Distribution` for the input variables
+ containing :class:`pymc.Distribution` for the output variables
+ in_vars : List
+ containing :class:`pymc.Distribution` for the input variables
shared : List
- containing :class:`theano.tensor.Tensor` for dependent shared data
+ containing :class:`pytensor.tensor.Tensor` for dependent shared data
"""
- out_list, inarray0 = join_nonshared_inputs(out_vars, vars, shared)
- f = function([inarray0], out_list)
+ logger.debug("Compiling PyTensor function")
+ out_list, inarray0 = join_nonshared_inputs(point, out_vars, in_vars, shared)
+ f = compile_pymc([inarray0], out_list) # , on_unused_input="ignore")
f.trust_input = True
return f
diff --git a/beat/sampler/distributed.py b/beat/sampler/distributed.py
index 202f8571..9f9aa32c 100644
--- a/beat/sampler/distributed.py
+++ b/beat/sampler/distributed.py
@@ -73,7 +73,6 @@ def __init__(self, tmp=None, keep_tmp=False, py_version=None):
logger.info("Done initialising mpi runner")
def run(self, script_path, n_jobs=None, loglevel="info", project_dir=""):
-
if n_jobs is None:
raise ValueError("n_jobs has to be defined!")
@@ -164,11 +163,11 @@ def signal_handler(signum, frame):
def __del__(self):
if self.tempdir:
if not self.keep_tmp:
- logger.debug('removing temporary directory under: "%s"' % self.tempdir)
+ logger.info('removing temporary directory under: "%s"' % self.tempdir)
shutil.rmtree(self.tempdir)
self.tempdir = None
else:
- logger.warning("not removing temporary directory: %s" % self.tempdir)
+ logger.info("not removing temporary directory: %s" % self.tempdir)
samplers = {"pt": "beat/sampler/pt.py"}
@@ -189,7 +188,7 @@ def run_mpi_sampler(
----------
sampler_name : string
valid names see distributed.samplers for available options
- model : :class:`pymc3.model.Model`
+ model : :class:`pymc.model.Model`
that holds the forward model graph
sampler_args : list
of sampler arguments, order is important
diff --git a/beat/sampler/metropolis.py b/beat/sampler/metropolis.py
index 033ba250..1ae8456b 100644
--- a/beat/sampler/metropolis.py
+++ b/beat/sampler/metropolis.py
@@ -1,25 +1,25 @@
"""
-Metropolis algorithm module, wrapping the pymc3 implementation.
+Metropolis algorithm module, wrapping the pymc implementation.
Provides the possibility to update the involved covariance matrixes within
the course of sampling the chain.
"""
import logging
-import os
-import shutil
-from copy import deepcopy
+import warnings
from time import time
import numpy as num
-from pymc3.backends import text
-from pymc3.model import Point, modelcontext
-from pymc3.step_methods.metropolis import metrop_select
-from pymc3.step_methods.metropolis import tune as step_tune
-from pymc3.theanof import inputvars, make_shared_replacements
-from pymc3.vartypes import discrete_types
+from pymc.model import Point, modelcontext
+from pymc.pytensorf import inputvars, make_shared_replacements
+from pymc.sampling import sample_prior_predictive
+
+# from pymc.smc.kernels import _logp_forw
+from pymc.step_methods.metropolis import metrop_select
+from pymc.step_methods.metropolis import tune as step_tune
+from pymc.vartypes import discrete_types
from pyrocko import util
-from theano import config as tconfig
+from pytensor import config as tconfig
from beat import backend, utility
from beat.covariance import init_proposal_covariance
@@ -33,7 +33,7 @@
update_last_samples,
)
-__all__ = ["metropolis_sample", "get_trace_stats", "get_final_stage", "Metropolis"]
+__all__ = ["metropolis_sample", "get_trace_stats", "Metropolis"]
logger = logging.getLogger("metropolis")
@@ -45,34 +45,25 @@ class Metropolis(backend.ArrayStepSharedLLK):
Parameters
----------
- vars : list
+ value_vars : list
List of variables for sampler
- out_vars : list
- List of output variables for trace recording. If empty unobserved_RVs
- are taken.
n_chains : int
Number of chains per stage has to be a large number
of number of n_jobs (processors to be used) on the machine.
scaling : float
Factor applied to the proposal distribution i.e. the step size of the
Markov Chain
- covariance : :class:`numpy.ndarray`
- (n_chains x n_chains) for MutlivariateNormal, otherwise (n_chains)
- Initial Covariance matrix for proposal distribution,
- if None - identity matrix taken
likelihood_name : string
- name of the :class:`pymc3.determinsitic` variable that contains the
+ name of the :class:`pymc.determinsitic` variable that contains the
model likelihood - defaults to 'like'
backend : str
type of backend to use for sample results storage, for alternatives
see :class:`backend.backend:catalog`
proposal_dist :
- :class:`pymc3.metropolis.Proposal`
- Type of proposal distribution, see
- :mod:`pymc3.step_methods.metropolis` for options
+ :class:`beat.sampler.base.Proposal` Type of proposal distribution
tune : boolean
Flag for adaptive scaling based on the acceptance rate
- model : :class:`pymc3.Model`
+ model : :class:`pymc.model.Model`
Optional model for sampling step.
Defaults to None (taken from context).
"""
@@ -81,9 +72,7 @@ class Metropolis(backend.ArrayStepSharedLLK):
def __init__(
self,
- vars=None,
- out_vars=None,
- covariance=None,
+ value_vars=None,
scale=1.0,
n_chains=100,
tune=True,
@@ -93,22 +82,19 @@ def __init__(
likelihood_name="like",
backend="csv",
proposal_name="MultivariateNormal",
- **kwargs
+ **kwargs,
):
-
model = modelcontext(model)
+ self.likelihood_name = likelihood_name
+ self.proposal_name = proposal_name
+ self.population = None
- if vars is None:
- vars = model.vars
-
- vars = inputvars(vars)
-
- if out_vars is None:
- out_vars = model.unobserved_RVs
+ if value_vars is None:
+ value_vars = model.value_vars
- out_varnames = [out_var.name for out_var in out_vars]
+ self.value_vars = inputvars(value_vars)
- self.scaling = utility.scalar2floatX(num.atleast_1d(scale))
+ self.scaling = utility.scalar2floatX(scale)
self.tune = tune
self.check_bound = check_bound
@@ -126,52 +112,105 @@ def __init__(
# needed to use the same parallel implementation function as for SMC
self.resampling_indexes = num.arange(n_chains)
self.n_chains = n_chains
-
- self.likelihood_name = likelihood_name
- self._llk_index = out_varnames.index(likelihood_name)
self.backend = backend
- self.discrete = num.concatenate(
- [[v.dtype in discrete_types] * (v.dsize or 1) for v in vars]
- )
- self.any_discrete = self.discrete.any()
- self.all_discrete = self.discrete.all()
- # create initial population
- self.population = []
- self.array_population = num.zeros(n_chains)
+ # initial point comes in reversed order for whatever reason
+ # rearrange to order of value_vars
+ init_point = model.initial_point()
+ self.test_point = {
+ val_var.name: init_point[val_var.name] for val_var in self.value_vars
+ }
+
+ self.initialize_population(model)
+ self.compile_model_graph(model)
+ self.initialize_proposal(model)
+
+ def initialize_population(self, model):
+ # create initial population from prior
logger.info(
- "Creating initial population for {}" " chains ...".format(self.n_chains)
+ "Creating initial population for {} chains ...".format(self.n_chains)
)
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore", category=UserWarning, message="The effect of Potentials"
+ )
+ var_names = [value_var.name for value_var in self.value_vars]
+ prior_draws = sample_prior_predictive(
+ samples=self.n_chains,
+ var_names=var_names,
+ model=model,
+ return_inferencedata=False,
+ )
+
+ self.array_population = num.zeros(self.n_chains)
+ self.population = []
for i in range(self.n_chains):
self.population.append(
- Point({v.name: v.random() for v in vars}, model=model)
+ Point({v_name: prior_draws[v_name][i] for v_name in var_names})
)
- self.population[0] = model.test_point
+ self.population[0] = self.test_point
+
+ def compile_model_graph(self, model):
+ logger.info("Compiling model graph ...")
+ shared = make_shared_replacements(self.test_point, self.value_vars, model)
+
+ # collect all RVs and deterministics to write to file
+ # out_vars = model.deterministics
+ out_vars = model.unobserved_RVs
+ out_varnames = [out_var.name for out_var in out_vars]
+ self._llk_index = out_varnames.index(self.likelihood_name)
+
+ # plot modelgraph
+ # model_to_graphviz(model).view()
+
+ in_rvs = [model.values_to_rvs[val_var] for val_var in self.value_vars]
+
+ self.logp_forw_func = logp_forw(
+ point=self.test_point,
+ out_vars=out_vars,
+ in_vars=in_rvs, # values of dists
+ shared=shared,
+ )
+
+ self.prior_logp_func = logp_forw(
+ point=self.test_point,
+ out_vars=[model.varlogp],
+ in_vars=self.value_vars, # logp of dists
+ shared=shared,
+ )
- shared = make_shared_replacements(vars, model)
- self.logp_forw = logp_forw(out_vars, vars, shared)
- self.check_bnd = logp_forw([model.varlogpt], vars, shared)
+ # determine if there are discrete variables
+ self.discrete = num.concatenate(
+ [
+ num.atleast_1d([v.dtype in discrete_types] * (v.size or 1))
+ for v in self.test_point.values()
+ ]
+ )
+ self.any_discrete = self.discrete.any()
+ self.all_discrete = self.discrete.all()
- super(Metropolis, self).__init__(vars, out_vars, shared)
+ super(Metropolis, self).__init__(self.value_vars, out_vars, shared)
+ def initialize_proposal(self, model):
# init proposal
- if covariance is None and proposal_name in multivariate_proposals:
+ logger.info("Initializing proposal distribution ...%s", self.proposal_name)
+ if self.proposal_name in multivariate_proposals:
+ if self.population is None:
+ raise ValueError("Sampler population needs to be initialised first!")
+
t0 = time()
self.covariance = init_proposal_covariance(
- bij=self.bij, vars=vars, model=model, pop_size=1000
+ bij=self.bij, population=self.population
)
t1 = time()
logger.info("Time for proposal covariance init: %f" % (t1 - t0))
scale = self.covariance
- elif covariance is None:
- scale = num.ones(sum(v.dsize for v in vars))
else:
- scale = covariance
+ scale = num.ones(sum(v.size for v in self.test_point.values()))
- self.proposal_name = proposal_name
self.proposal_dist = choose_proposal(self.proposal_name, scale=scale)
- self.proposal_samples_array = self.proposal_dist(n_chains)
+ self.proposal_samples_array = self.proposal_dist(self.n_chains)
self.chain_previous_lpoint = [[]] * self.n_chains
self._tps = None
@@ -182,11 +221,10 @@ def _sampler_state_blacklist(self):
"""
bl = [
"check_bnd",
- "logp_forw",
+ "logp_forw_func",
"proposal_samples_array",
- "vars",
+ "value_vars",
"bij",
- "lij",
"ordering",
"lordering",
"_BlockedStep__newargs",
@@ -224,7 +262,7 @@ def time_per_sample(self, n_points=10):
for i in range(n_points):
q = self.bij.map(self.population[i])
t0 = time()
- self.logp_forw(q)
+ self.logp_forw_func(q.data)
t1 = time()
tps[i] = t1 - t0
self._tps = tps.mean()
@@ -232,7 +270,7 @@ def time_per_sample(self, n_points=10):
def astep(self, q0):
if self.stage == 0:
- l_new = self.logp_forw(q0)
+ l_new = self.logp_forw_func(q0)
if not num.isfinite(l_new[self._llk_index]):
raise ValueError(
"Got NaN in likelihood evaluation! "
@@ -279,14 +317,13 @@ def astep(self, q0):
q = q0 + delta
q = q[self.discrete].astype(int)
else:
-
q = q0 + delta
try:
l0 = self.chain_previous_lpoint[self.chain_index]
llk0 = l0[self._llk_index]
except IndexError:
- l0 = self.logp_forw(q0)
+ l0 = self.logp_forw_func(q0)
self.chain_previous_lpoint[self.chain_index] = l0
llk0 = l0[self._llk_index]
@@ -295,26 +332,27 @@ def astep(self, q0):
"Checking bound: Chain_%i step_%i"
% (self.chain_index, self.stage_sample)
)
- varlogp = self.check_bnd(q)
-
- if num.isfinite(varlogp):
+ # print("before prior test", q)
+ priorlogp = self.prior_logp_func(q)
+ # print("prior", priorlogp)
+ if num.isfinite(priorlogp):
logger.debug(
"Calc llk: Chain_%i step_%i"
% (self.chain_index, self.stage_sample)
)
-
- lp = self.logp_forw(q)
-
+ # print("previous sample", q0)
+ lp = self.logp_forw_func(q)
logger.debug(
"Select llk: Chain_%i step_%i"
% (self.chain_index, self.stage_sample)
)
-
+ # print("current sample", q)
tempered_llk_ratio = self.beta * (
lp[self._llk_index] - l0[self._llk_index]
)
q_new, accepted = metrop_select(tempered_llk_ratio, q, q0)
-
+ # print("accepted:", q_new)
+ # print("-----------------------------------")
if accepted:
logger.debug(
"Accepted: Chain_%i step_%i"
@@ -345,7 +383,7 @@ def astep(self, q0):
"Calc llk: Chain_%i step_%i" % (self.chain_index, self.stage_sample)
)
- lp = self.logp_forw(q)
+ lp = self.logp_forw_func(q)
logger.debug(
"Select: Chain_%i step_%i" % (self.chain_index, self.stage_sample)
@@ -379,34 +417,6 @@ def astep(self, q0):
return q_new, l_new
-def get_final_stage(homepath, n_stages, model):
- """
- Combine Metropolis results into final stage to get one single chain for
- plotting results.
- """
-
- util.ensuredir(homepath)
-
- mtraces = []
- for stage in range(n_stages):
- logger.info("Loading Metropolis stage %i" % stage)
- stage_outpath = os.path.join(homepath, "stage_%i" % stage)
-
- mtraces.append(backend.load(name=stage_outpath, model=model))
-
- ctrace = backend.concatenate_traces(mtraces)
- outname = os.path.join(homepath, "stage_final")
-
- if os.path.exists(outname):
- logger.info("Removing existing previous final stage!")
- shutil.rmtree(outname)
-
- util.ensuredir(outname)
- logger.info("Creating final Metropolis stage")
-
- text.dump(name=outname, trace=ctrace)
-
-
def metropolis_sample(
n_steps=10000,
homepath=None,
@@ -474,7 +484,6 @@ def metropolis_sample(
)
with model:
-
chains = stage_handler.clean_directory(step.stage, chains, rm_flag)
logger.info("Sampling stage ...")
@@ -516,7 +525,7 @@ def metropolis_sample(
update.engine.close_cashed_stores()
outparam_list = [step.get_sampler_state(), update]
- stage_handler.dump_atmip_params(step.stage, outparam_list)
+ stage_handler.dump_smc_params(step.stage, outparam_list)
def get_trace_stats(mtrace, step, burn=0.5, thin=2, n_jobs=1):
@@ -525,7 +534,7 @@ def get_trace_stats(mtrace, step, burn=0.5, thin=2, n_jobs=1):
Parameters
----------
- mtrace : :class:`pymc3.backends.base.MultiTrace`
+ mtrace : :class:`pymc.backends.base.MultiTrace`
Multitrace sampling result
step : initialised :class:`smc.SMC` sampler object
burn : float
diff --git a/beat/sampler/pt.py b/beat/sampler/pt.py
index 1a740e40..0aeaa320 100644
--- a/beat/sampler/pt.py
+++ b/beat/sampler/pt.py
@@ -8,32 +8,22 @@
# disable internal(fine) blas parallelisation as we parallelise over chains
os.environ["OMP_NUM_THREADS"] = "1"
-from collections import OrderedDict
-from copy import deepcopy
-from logging import getLevelName, getLogger
-from pickle import HIGHEST_PROTOCOL
-
-import numpy as num
-from mpi4py import MPI
-from theano import config as tconfig
-
-from beat.backend import MemoryChain, SampleStage, backend_catalog
-from beat.config import sample_p_outname
-from beat.sampler import distributed
-from beat.sampler.base import (
- ChainCounter,
- Proposal,
- _iter_sample,
- choose_proposal,
- multivariate_proposals,
-)
-from beat.utility import dump_objects, list2string, load_objects, setup_logging
-
-logger = getLogger("pt")
+if True: # noqa: E402
+ from collections import OrderedDict
+ from copy import deepcopy
+ from logging import getLevelName, getLogger
+ from pickle import HIGHEST_PROTOCOL
+ import numpy as num
+ from pytensor import config as tconfig
-MPI.pickle.PROTOCOL = HIGHEST_PROTOCOL
+ from beat.backend import MemoryChain, SampleStage, backend_catalog
+ from beat.config import sample_p_outname
+ from beat.sampler import distributed
+ from beat.sampler.base import ChainCounter, Proposal, _iter_sample, choose_proposal
+ from beat.utility import dump_objects, list2string, load_objects, setup_logging
+logger = getLogger("pt")
__all__ = [
"pt_sample",
@@ -92,7 +82,6 @@ def __init__(self):
self.filename = sample_p_outname
def record(self, sample_count, acceptance_matrix, t_scale, acceptance):
-
self.sample_counts.append(sample_count)
self.acceptance_matrixes.append(acceptance_matrix)
self.acceptance.append(acceptance)
@@ -129,7 +118,6 @@ def __init__(
beta_tune_interval,
n_workers_posterior,
):
-
self.n_workers = n_workers
self.n_workers_posterior = n_workers_posterior
self.n_workers_tempered = int(self.n_workers - self.n_workers_posterior)
@@ -400,7 +388,6 @@ def get_package(self, source, trace=None, resample=False, burnin=1000):
"""
if source not in self._worker_package_mapping.keys():
-
step = deepcopy(self.step)
step.beta = self.betas[self.worker2index(source)] # subtract master
step.stage = 1
@@ -564,7 +551,7 @@ def master_process(
logger.info("Sending work packages to workers...")
manager.update_betas()
for beta in manager.betas:
- comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status)
+ comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status) # noqa: F821
source = status.Get_source()
if record_worker_chains:
@@ -595,17 +582,22 @@ def master_process(
logger.info("Sampling ...")
logger.info("------------")
while True:
-
m1 = num.empty(manager.step.lordering.size)
comm.Recv(
- [m1, MPI.DOUBLE], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status
+ [m1, MPI.DOUBLE], # noqa: F821
+ source=MPI.ANY_SOURCE, # noqa: F821
+ tag=MPI.ANY_TAG, # noqa: F821
+ status=status, # noqa: F821
)
source1 = status.Get_source()
logger.debug("Got sample 1 from worker %i" % source1)
m2 = num.empty(manager.step.lordering.size)
comm.Recv(
- [m2, MPI.DOUBLE], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status
+ [m2, MPI.DOUBLE], # noqa: F821
+ source=MPI.ANY_SOURCE, # noqa: F821
+ tag=MPI.ANY_TAG, # noqa: F821
+ status=status, # noqa: F821
)
source2 = status.Get_source()
logger.debug("Got sample 2 from worker %i" % source2)
@@ -620,6 +612,7 @@ def master_process(
steps_until_tune += 1
m1, m2 = manager.propose_chain_swap(m1, m2, source1, source2)
+
# beta updating
if steps_until_tune >= beta_tune_interval:
manager.tune_betas()
@@ -630,7 +623,7 @@ def master_process(
for source in [source1, source2]:
if not manager.worker_beta_updated(source1):
comm.Send(
- [manager.get_beta(source), MPI.DOUBLE],
+ [manager.get_beta(source), MPI.DOUBLE], # noqa: F821
dest=source,
tag=tags.BETA,
)
@@ -666,7 +659,7 @@ def worker_process(comm, tags, status):
tags : message tags
status : mpi.status object
"""
- name = MPI.Get_processor_name()
+ name = MPI.Get_processor_name() # noqa: F821
logger.debug("Entering worker process with rank %d on %s." % (comm.rank, name))
comm.send(None, dest=0, tag=tags.READY)
@@ -681,13 +674,13 @@ def worker_process(comm, tags, status):
# do initial sampling
result = sample_pt_chain(**kwargs)
- comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)
+ comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE) # noqa: F821
# enter repeated sampling
while True:
# TODO: make transd-compatible
data = num.empty(step.lordering.size, dtype=tconfig.floatX)
- comm.Recv([data, MPI.DOUBLE], tag=MPI.ANY_TAG, source=0, status=status)
+ comm.Recv([data, MPI.DOUBLE], tag=MPI.ANY_TAG, source=0, status=status) # noqa: F821
tag = status.Get_tag()
if tag == tags.SAMPLE:
@@ -699,7 +692,7 @@ def worker_process(comm, tags, status):
result = sample_pt_chain(**kwargs)
logger.debug("Worker %i attempting to send ..." % comm.rank)
- comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)
+ comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE) # noqa: F821
logger.debug("Worker %i sent message successfully ..." % comm.rank)
elif tag == tags.BETA:
@@ -748,7 +741,7 @@ def sample_pt_chain(
Number of iterations to tune, if applicable (defaults to None)
progressbar : bool
Flag for displaying a progress bar
- model : :class:`pymc3.Model`
+ model : :class:`pymc.Model`
(optional if in `with` context) has to contain deterministic
variable name defined under step.likelihood_name' that contains the
model likelihood
@@ -758,7 +751,7 @@ def sample_pt_chain(
:class:`numpy.NdArray` with end-point of the MarkovChain
"""
if isinstance(draws, Proposal):
- n_steps = int(draws())
+ n_steps = draws()[0]
else:
n_steps = draws
@@ -776,7 +769,6 @@ def sample_pt_chain(
trace,
chain,
tune,
- model,
random_seed,
overwrite=False,
update_proposal=update_proposal,
@@ -855,7 +847,7 @@ def pt_sample(
buffer_thinning : int
every nth sample of the buffer is written to disk,
default: 1 (no thinning)
- model : :class:`pymc3.Model`
+ model : :class:`pymc.Model`
(optional if in `with` context) has to contain deterministic
variable name defined under step.likelihood_name' that contains the
model likelihood
@@ -943,6 +935,17 @@ def _sample():
if __name__ == "__main__":
+ import cloudpickle
+
+ try:
+ from mpi4py import MPI
+
+ logger.debug("Found MPI")
+ except ImportError:
+ raise ImportError("'mpi4py' and a mpi library need to be installed!")
+
+ MPI.pickle.__init__(cloudpickle.dumps, cloudpickle.loads)
+ MPI.pickle.PROTOCOL = HIGHEST_PROTOCOL
try:
_, levelname, project_dir = sys.argv
@@ -953,4 +956,5 @@ def _sample():
setup_logging(
project_dir=project_dir, levelname=levelname, logfilename="BEAT_log.txt"
)
+
_sample()
diff --git a/beat/sampler/smc.py b/beat/sampler/smc.py
index 3a29f7de..2a28f3f3 100644
--- a/beat/sampler/smc.py
+++ b/beat/sampler/smc.py
@@ -1,13 +1,14 @@
"""
Sequential Monte Carlo Sampler module;
-Runs on any pymc3 model.
+Runs on any pymc model.
"""
import logging
import numpy as np
-from pymc3.model import modelcontext
+from pymc.blocking import RaveledVars
+from pymc.model import modelcontext
from beat import backend, utility
@@ -39,17 +40,13 @@ class SMC(Metropolis):
scaling : float
Factor applied to the proposal distribution i.e. the step size of the
Markov Chain
- covariance : :class:`numpy.ndarray`
- (n_chains x n_chains) for MutlivariateNormal, otherwise (n_chains)
- Initial Covariance matrix for proposal distribution,
- if None - identity matrix taken
likelihood_name : string
- name of the :class:`pymc3.determinsitic` variable that contains the
+ name of the :class:`pymc.determinsitic` variable that contains the
model likelihood - defaults to 'like'
proposal_dist :
- :class:`pymc3.metropolis.Proposal`
+ :class:`pymc.metropolis.Proposal`
Type of proposal distribution, see
- :mod:`pymc3.step_methods.metropolis` for options
+ :mod:`pymc.step_methods.metropolis` for options
tune : boolean
Flag for adaptive scaling based on the acceptance rate
coef_variation : scalar, float
@@ -61,7 +58,7 @@ class SMC(Metropolis):
Check if current sample lies outside of variable definition
speeds up computation as the forward model won't be executed
default: True
- model : :class:`pymc3.Model`
+ model : :class:`pymc.Model`
Optional model for sampling step.
Defaults to None (taken from context).
backend : str
@@ -84,7 +81,6 @@ def __init__(
self,
vars=None,
out_vars=None,
- covariance=None,
scale=1.0,
n_chains=100,
tune=True,
@@ -95,13 +91,11 @@ def __init__(
proposal_name="MultivariateNormal",
backend="csv",
coef_variation=1.0,
- **kwargs
+ **kwargs,
):
-
super(SMC, self).__init__(
vars=vars,
out_vars=out_vars,
- covariance=covariance,
scale=scale,
n_chains=n_chains,
tune=tune,
@@ -111,7 +105,7 @@ def __init__(
likelihood_name=likelihood_name,
backend=backend,
proposal_name=proposal_name,
- **kwargs
+ **kwargs,
)
self.beta = 0
@@ -126,10 +120,9 @@ def _sampler_state_blacklist(self):
bl = [
"likelihoods",
"check_bnd",
- "logp_forw",
+ "logp_forw_func",
"bij",
"lij",
- "ordering",
"lordering",
"proposal_samples_array",
"vars",
@@ -199,42 +192,50 @@ def select_end_points(self, mtrace):
Parameters
----------
- mtrace : :class:`pymc3.backend.base.MultiTrace`
+ mtrace : :class:`pymc.backend.base.MultiTrace`
Returns
-------
population : list
- of :func:`pymc3.Point` dictionaries
+ of :func:`pymc.Point` dictionaries
array_population : :class:`numpy.ndarray`
Array of trace end-points
likelihoods : :class:`numpy.ndarray`
Array of likelihoods of the trace end-points
"""
+ q = self.bij.map(self.test_point)
- array_population = np.zeros((self.n_chains, self.ordering.size))
-
+ array_population = np.zeros((self.n_chains, q.data.size))
n_steps = len(mtrace)
# collect end points of each chain and put into array
- for var, slc, shp, _ in self.ordering.vmap:
+ last_idx = 0
+ for var_name, shp, dtype in q.point_map_info:
slc_population = mtrace.get_values(
- varname=var, burn=n_steps - 1, combine=True
+ varname=var_name, burn=n_steps - 1, combine=True
)
-
+ arr_len = np.prod(shp, dtype=int)
+ slc = slice(last_idx, last_idx + arr_len)
if len(shp) == 0:
array_population[:, slc] = np.atleast_2d(slc_population).T
else:
array_population[:, slc] = slc_population
+ last_idx += arr_len
+
# get likelihoods
likelihoods = mtrace.get_values(
varname=self.likelihood_name, burn=n_steps - 1, combine=True
)
- population = []
# map end array_endpoints to dict points
+ population = []
for i in range(self.n_chains):
- population.append(self.bij.rmap(array_population[i, :]))
+ population.append(
+ self.bij.rmap(
+ RaveledVars(array_population[i, :], point_map_info=q.point_map_info)
+ )
+ )
return population, array_population, likelihoods
@@ -245,7 +246,7 @@ def get_chain_previous_lpoint(self, mtrace):
Parameters
----------
- mtrace : :class:`pymc3.backend.base.MultiTrace`
+ mtrace : :class:`pymc.backend.base.MultiTrace`
Returns
-------
@@ -258,7 +259,6 @@ def get_chain_previous_lpoint(self, mtrace):
n_steps = len(mtrace)
for _, slc, shp, _, var in self.lordering.vmap:
-
slc_population = mtrace.get_values(
varname=var, burn=n_steps - 1, combine=True
)
@@ -376,7 +376,7 @@ def smc_sample(
continue after completed stages (stage should be the number of the
completed stage + 1). If None the start will be at stage = 0.
n_jobs : int
- The number of cores to be used in parallel. Be aware that theano has
+ The number of cores to be used in parallel. Be aware that pytensor has
internal parallelisation. Sometimes this is more efficient especially
for simple models.
step.n_chains / n_jobs has to be an integer number!
@@ -390,7 +390,7 @@ def smc_sample(
buffer_thinning : int
every nth sample of the buffer is written to disk
default: 1 (no thinning)
- model : :class:`pymc3.Model`
+ model : :class:`pymc.Model`
(optional if in `with` context) has to contain deterministic
variable name defined under step.likelihood_name' that contains the
model likelihood
@@ -509,10 +509,7 @@ def smc_sample(
step.beta = 1.0
save_sampler_state(step, update, stage_handler)
- if stage == -1:
- chains = []
- else:
- chains = None
+ chains = stage_handler.clean_directory(-1, chains, rm_flag)
else:
step.covariance = step.calc_covariance()
step.proposal_dist = choose_proposal(
@@ -557,7 +554,7 @@ def save_sampler_state(step, update, stage_handler):
weights = None
outparam_list = [step.get_sampler_state(), weights]
- stage_handler.dump_atmip_params(step.stage, outparam_list)
+ stage_handler.dump_smc_params(step.stage, outparam_list)
def tune(acc_rate):
@@ -578,3 +575,4 @@ def tune(acc_rate):
a = 1.0 / 9
b = 8.0 / 9
return np.power((a + (b * acc_rate)), 2)
+ return np.power((a + (b * acc_rate)), 2)
diff --git a/beat/sources.py b/beat/sources.py
index 7d4cc2ef..60936afa 100644
--- a/beat/sources.py
+++ b/beat/sources.py
@@ -347,7 +347,6 @@ def extent_source(
@classmethod
def from_kite_source(cls, source, kwargs):
-
d = dict(
lat=source.lat,
lon=source.lon,
@@ -361,7 +360,7 @@ def from_kite_source(cls, source, kwargs):
rake=source.rake,
slip=source.slip,
anchor="top",
- **kwargs
+ **kwargs,
)
if hasattr(source, "decimation_factor"):
@@ -444,7 +443,6 @@ class MTQTSource(gf.SourceWithMagnitude):
)
def __init__(self, **kwargs):
-
self.R = get_rotation_matrix()
self.roty_pi4 = self.R["y"](-pi4)
self.rotx_pi = self.R["x"](pi)
@@ -554,7 +552,7 @@ def discretize_basesource(self, store, target=None):
)
return meta.DiscretizedMTSource(
m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
- **self._dparams_base_repeated(times)
+ **self._dparams_base_repeated(times),
)
def pyrocko_moment_tensor(self, store=None, target=None):
@@ -619,7 +617,7 @@ class MTSourceWithMagnitude(gf.SourceWithMagnitude):
def __init__(self, **kwargs):
if "m6" in kwargs:
- for (k, v) in zip("mnn mee mdd mne mnd med".split(), kwargs.pop("m6")):
+ for k, v in zip("mnn mee mdd mne mnd med".split(), kwargs.pop("m6")):
kwargs[k] = float(v)
Source.__init__(self, **kwargs)
@@ -663,7 +661,7 @@ def discretize_basesource(self, store, target=None):
m6s = self.scaled_m6 * m0
return meta.DiscretizedMTSource(
m6s=m6s[num.newaxis, :] * amplitudes[:, num.newaxis],
- **self._dparams_base_repeated(times)
+ **self._dparams_base_repeated(times),
)
def pyrocko_moment_tensor(self):
@@ -675,7 +673,7 @@ def pyrocko_event(self, **kwargs):
self,
moment_tensor=self.pyrocko_moment_tensor(),
magnitude=float(mt.moment_magnitude()),
- **kwargs
+ **kwargs,
)
@classmethod
@@ -691,3 +689,41 @@ def from_pyrocko_event(cls, ev, **kwargs):
def get_derived_parameters(self, point=None, store=None, target=None, event=None):
mt = mtm.MomentTensor.from_values(self.scaled_m6)
return num.hstack(mt.both_strike_dip_rake())
+
+
+source_names = """
+ ExplosionSource
+ RectangularExplosionSource
+ SFSource
+ DCSource
+ CLVDSource
+ MTSource
+ MTQTSource
+ RectangularSource
+ DoubleDCSource
+ RingfaultSource
+ """.split()
+
+
+source_classes = [
+ gf.ExplosionSource,
+ gf.RectangularExplosionSource,
+ gf.SFSource,
+ gf.DCSource,
+ gf.CLVDSource,
+ MTSourceWithMagnitude,
+ MTQTSource,
+ gf.RectangularSource,
+ gf.DoubleDCSource,
+ gf.RingfaultSource,
+]
+
+source_catalog = dict(zip(source_names, source_classes))
+
+stf_names = """
+ Boxcar
+ Triangular
+ HalfSinusoid
+ """.split()
+
+stf_catalog = dict(zip(stf_names, gf.seismosizer.stf_classes[1:4]))
diff --git a/beat/utility.py b/beat/utility.py
index 24dd58c2..a9d57f7e 100644
--- a/beat/utility.py
+++ b/beat/utility.py
@@ -11,29 +11,47 @@
import copy
import logging
import os
-import pickle
import re
from functools import wraps
from timeit import Timer
+import cloudpickle as pickle
import numpy as num
from pyrocko import catalog, orthodrome, util
from pyrocko.cake import LayeredModel, m2d, read_nd_model_str
-from pyrocko.gf.seismosizer import RectangularSource
from pyrocko.guts import Float, Int, Object
-from theano import config as tconfig
+from pytensor import config as tconfig
logger = logging.getLogger("utility")
DataMap = collections.namedtuple("DataMap", "list_ind, slc, shp, dtype, name")
-locationtypes = {"east_shift", "north_shift", "depth", "distance", "delta_depth"}
-dimensiontypes = {"length", "width", "diameter"}
+locationtypes = {
+ "east_shift",
+ "north_shift",
+ "depth",
+ "distance",
+ "delta_depth",
+ "delta_east_shift_bottom",
+ "delta_north_shift_bottom",
+ "depth_bottom",
+}
+
+dimensiontypes = {
+ "length",
+ "width",
+ "diameter",
+ "a_half_axis",
+ "b_half_axis",
+ "a_half_axis_bottom",
+ "b_half_axis_bottom",
+}
mttypes = {"mnn", "mee", "mdd", "mne", "mnd", "med"}
degtypes = {"strike", "dip", "rake"}
nucleationtypes = {"nucleation_x", "nucleation_y"}
patch_anchor_points = {"center", "bottom_depth", "bottom_left"}
+patypes = {"traction", "strike_traction", "dip_traction", "tensile_traction"}
kmtypes = set.union(locationtypes, dimensiontypes, patch_anchor_points)
grouped_vars = set.union(kmtypes, mttypes, degtypes, nucleationtypes)
@@ -70,7 +88,6 @@ def __init__(self):
self.d = dict()
def __call__(self, string, multiplier=1):
-
if string not in self.d:
self.d[string] = 0
else:
@@ -95,13 +112,13 @@ def reset(self, string=None):
class ListArrayOrdering(object):
"""
- An ordering for a list to an array space. Takes also non theano.tensors.
- Modified from pymc3 blocking.
+ An ordering for a list to an array space. Takes also non pytensor.tensors.
+ Modified from pymc blocking.
Parameters
----------
list_arrays : list
- :class:`numpy.ndarray` or :class:`theano.tensor.Tensor`
+ :class:`numpy.ndarray` or :class:`pytensor.tensor.Tensor`
intype : str
defining the input type 'tensor' or 'numpy'
"""
@@ -114,7 +131,7 @@ def __init__(self, list_arrays, intype="numpy"):
for array in list_arrays:
if intype == "tensor":
name = array.name
- array = array.tag.test_value
+ array = array.get_test_value()
elif intype == "numpy":
name = "numpy"
@@ -179,7 +196,6 @@ def d2l(self, dpt):
-------
lpoint
"""
-
a_list = copy.copy(self.list_arrays)
for list_ind, _, shp, _, var in self.ordering.vmap:
@@ -202,7 +218,7 @@ def l2d(self, a_list):
Returns
-------
- :class:`pymc3.model.Point`
+ :class:`pymc.model.Point`
"""
point = {}
@@ -316,12 +332,12 @@ def srmap(self, tarray):
Parameters
----------
- tarray : :class:`theano.tensor.Tensor`
+ tarray : :class:`pytensor.tensor.Tensor`
Returns
-------
a_list : list
- of :class:`theano.tensor.Tensor`
+ of :class:`pytensor.tensor.Tensor`
"""
a_list = copy.copy(self.list_arrays)
@@ -340,7 +356,7 @@ def weed_input_rvs(input_rvs, mode, datatype):
Parameters
----------
input_rvs : dict
- of :class:`pymc3.Distribution` or set of variable names
+ of :class:`pymc.Distribution` or set of variable names
mode : str
'geometry', 'static, 'kinematic', 'interseismic' determining the
discarded RVs
@@ -350,7 +366,7 @@ def weed_input_rvs(input_rvs, mode, datatype):
Returns
-------
weeded_input_rvs : dict
- of :class:`pymc3.Distribution`
+ of :class:`pymc.Distribution`
"""
weeded_input_rvs = copy.copy(input_rvs)
@@ -390,12 +406,17 @@ def weed_input_rvs(input_rvs, mode, datatype):
"fd",
] + burian
- elif mode == "interseismic":
+ elif mode == "bem":
if datatype == "geodetic":
- tobeweeded = burian
-
- else:
+ tobeweeded = [
+ "time",
+ "duration",
+ "peak_ratio",
+ ] + burian
+ elif mode == "ffi":
tobeweeded = []
+ else:
+ raise TypeError(f"Mode {mode} not supported!")
for weed in tobeweeded:
if isinstance(weeded_input_rvs, dict):
@@ -606,13 +627,16 @@ def transform_sources(sources, datatypes, decimation_factors=None):
for datatype in datatypes:
transformed_sources = []
- for source in sources:
- transformed_source = copy.deepcopy(source)
+ for idx, source in enumerate(sources):
+ transformed_source = source.clone()
if decimation_factors is not None:
- transformed_source.update(
- decimation_factor=decimation_factors[datatype], anchor="top"
- )
+ try:
+ transformed_source.update(
+ decimation_factor=decimation_factors[datatype], anchor="top"
+ )
+ except KeyError:
+ logger.info("Not setting decimation for source %i" % idx)
if datatype == "geodetic" or datatype == "polarity":
transformed_source.stf = None
@@ -631,25 +655,27 @@ def adjust_point_units(point):
Parameters
----------
point : dict
- :func:`pymc3.model.Point` of model parameter units as keys
+ :func:`pymc.model.Point` of model parameter units as keys
Returns
-------
mpoint : dict
- :func:`pymc3.model.Point`
+ :func:`pymc.model.Point`
"""
mpoint = {}
for key, value in point.items():
if key in kmtypes:
mpoint[key] = value * km
+ elif key in patypes:
+ mpoint[key] = value * km * km
else:
mpoint[key] = value
return mpoint
-def split_point(point):
+def split_point(point, mapping=None, n_sources_total=None, weed_params=False):
"""
Split point in solution space into List of dictionaries with source
parameters for each source.
@@ -657,27 +683,53 @@ def split_point(point):
Parameters
----------
point : dict
- :func:`pymc3.model.Point`
+ :func:`pymc.model.Point`
+ mapping : :class: `beat.config.DatatypeParameterMapping`
+ n_sources_total : int
+ total number of sources for each type in setup
+ weed_params: bool
+ if True only source related parameters are kept in the point
+ if False it may raise an error.
Returns
-------
source_points : list
- of :func:`pymc3.model.Point`
+ of :func:`pymc.model.Point`
"""
- params = point.keys()
- if len(params) > 0:
- n_sources = point[next(iter(params))].shape[0]
- else:
- n_sources = 0
- source_points = []
- for i in range(n_sources):
- source_param_dict = dict()
- for param, value in point.items():
- source_param_dict[param] = float(value[i])
+ if mapping is not None and n_sources_total is not None:
+ raise ValueError("Must provide either mapping or n_sources_total")
+
+ if mapping is None and n_sources_total is None:
+ raise ValueError("Must provide either mapping or n_sources_total")
- source_points.append(source_param_dict)
+ if mapping is not None:
+ point_to_sources = mapping.point_to_sources_mapping()
+ n_sources_total = mapping.n_sources
+ else:
+ point_to_sources = None
+
+ if weed_params:
+ source_parameter_names = mapping.point_variable_names()
+ for param in list(point.keys()):
+ if param not in source_parameter_names:
+ point.pop(param)
+
+ source_points = [{} for i in range(n_sources_total)]
+ for param, values in point.items():
+ if point_to_sources:
+ source_idxs = point_to_sources[param]
+ else:
+ source_idxs = range(n_sources_total)
+ for value, idx in zip(values, source_idxs):
+ try:
+ source_points[idx][param] = float(value)
+ except IndexError:
+ raise IndexError(
+ "Tried to set index %i for parameter %s, but does not exist."
+ % (idx, param)
+ )
return source_points
@@ -727,10 +779,10 @@ def update_source(source, **point):
----------
source : :class:`pyrocko.gf.seismosizer.Source`
point : dict
- :func:`pymc3.model.Point`
+ :func:`pymc.model.Point`
"""
- for (k, v) in point.items():
+ for k, v in point.items():
if k not in source.keys():
if source.stf is not None:
try:
@@ -1126,7 +1178,7 @@ def slice2string(slice_obj):
return slice_obj
-def list2string(l, fill=", "):
+def list2string(any_list, fill=", "):
"""
Convert list of string to single string.
@@ -1135,7 +1187,7 @@ def list2string(l, fill=", "):
l: list
of strings
"""
- return fill.join("%s" % slice2string(listentry) for listentry in l)
+ return fill.join("%s" % slice2string(listentry) for listentry in any_list)
def string2slice(slice_string):
@@ -1151,7 +1203,7 @@ def string2slice(slice_string):
return slice(*[int(idx) for idx in slice_string.split(":")])
-def unique_list(l):
+def unique_list(any_list):
"""
Find unique entries in list and return them in a list.
Keeps variable order.
@@ -1165,7 +1217,7 @@ def unique_list(l):
list with only unique elements
"""
used = []
- return [x for x in l if x not in used and (used.append(x) or True)]
+ return [x for x in any_list if x not in used and (used.append(x) or True)]
def join_models(global_model, crustal_model):
@@ -1193,7 +1245,7 @@ def join_models(global_model, crustal_model):
return joined_model
-def split_off_list(l, off_length):
+def split_off_list(any_list, off_length):
"""
Split a list with length 'off_length' from the beginning of an input
list l.
@@ -1211,7 +1263,7 @@ def split_off_list(l, off_length):
list
"""
- return [l.pop(0) for i in range(off_length)]
+ return [any_list.pop(0) for i in range(off_length)]
def mod_i(i, cycle):
@@ -1257,12 +1309,12 @@ def biggest_common_divisor(a, b):
return int(a)
-def gather(l, key, sort=None, filter=None):
+def gather(any_list, key, sort=None, filter=None):
"""
Return dictionary of input l grouped by key.
"""
d = {}
- for x in l:
+ for x in any_list:
if filter is not None and not filter(x):
continue
@@ -1507,7 +1559,6 @@ def positions2idxs(positions, cell_size, min_pos=0.0, backend=num, dtype="int16"
def rotate_coords_plane_normal(coords, sf):
-
coords -= sf.bottom_left / km
rots = get_rotation_matrix()
@@ -1605,12 +1656,10 @@ def find_elbow(data, theta=None, rotate_left=False):
class StencilOperator(Object):
-
h = Float.T(default=0.1, help="step size left and right of the reference value")
order = Int.T(default=3, help="number of points of central differences")
def __init__(self, **kwargs):
-
stencil_order = kwargs["order"]
if stencil_order not in [3, 5]:
raise ValueError(
diff --git a/beat/voronoi/voronoi.py b/beat/voronoi/voronoi.py
index 56ff8750..cbeade2a 100644
--- a/beat/voronoi/voronoi.py
+++ b/beat/voronoi/voronoi.py
@@ -1,5 +1,4 @@
import numpy as num
-
import voronoi_ext
@@ -33,7 +32,6 @@ def get_voronoi_cell_indexes_c(
def get_voronoi_cell_indexes_numpy(
gf_points_dip, gf_points_strike, voronoi_points_dip, voronoi_points_strike
):
-
n_voros = voronoi_points_dip.size
n_gfs = gf_points_dip.size
diff --git a/data/examples/Fernandina/config_geometry.yaml b/data/examples/Fernandina/config_geometry.yaml
index 12df33c2..77edc993 100644
--- a/data/examples/Fernandina/config_geometry.yaml
+++ b/data/examples/Fernandina/config_geometry.yaml
@@ -10,11 +10,11 @@ event: !pf.Event
project_dir: /home/vasyurhm/BEATS/Fernandina
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: RectangularSource
+ source_types: [RectangularSource]
stf_type: HalfSinusoid
decimation_factors:
geodetic: 2
- n_sources: 1
+ n_sources: [1]
datatypes: [geodetic]
hyperparameters:
h_SAR: !beat.heart.Parameter
diff --git a/data/examples/FullMT/config_geometry.yaml b/data/examples/FullMT/config_geometry.yaml
index c76de5aa..30158481 100644
--- a/data/examples/FullMT/config_geometry.yaml
+++ b/data/examples/FullMT/config_geometry.yaml
@@ -30,9 +30,9 @@ event: !pf.Event
project_dir: /home/vasyurhm/BEATS/FullMT
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: MTSource
+ source_types: [MTSource]
stf_type: Triangular
- n_sources: 1
+ n_sources: [1]
datatypes: [seismic]
hyperparameters:
h_any_P_0_Z: !beat.heart.Parameter
diff --git a/data/examples/Laquila/config_geometry.yaml b/data/examples/Laquila/config_geometry.yaml
index d634cbdc..f1834c60 100644
--- a/data/examples/Laquila/config_geometry.yaml
+++ b/data/examples/Laquila/config_geometry.yaml
@@ -29,12 +29,12 @@ event: !pf.Event
project_dir: /home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: RectangularSource
+ source_types: [RectangularSource]
stf_type: HalfSinusoid
decimation_factors:
geodetic: 4
seismic: 1
- n_sources: 1
+ n_sources: [1]
datatypes: [geodetic, seismic]
hyperparameters:
h_SAR: !beat.heart.Parameter
diff --git a/data/examples/MTQT_polarity/config_geometry.yaml b/data/examples/MTQT_polarity/config_geometry.yaml
index 9ae36d50..8554bae1 100644
--- a/data/examples/MTQT_polarity/config_geometry.yaml
+++ b/data/examples/MTQT_polarity/config_geometry.yaml
@@ -30,9 +30,9 @@ event: !pf.Event
project_dir: /home/vasyurhm/BEATS/Polarity_tutorial
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: MTQTSource
+ source_types: [MTQTSource]
stf_type: Triangular
- n_sources: 1
+ n_sources: [1]
datatypes:
- polarity
hyperparameters:
diff --git a/data/examples/dc_teleseismic/config_geometry.yaml b/data/examples/dc_teleseismic/config_geometry.yaml
index 7c381b00..a0b4a514 100644
--- a/data/examples/dc_teleseismic/config_geometry.yaml
+++ b/data/examples/dc_teleseismic/config_geometry.yaml
@@ -30,9 +30,9 @@ event: !pf.Event
project_dir: /home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_DC_var
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: DCSource
+ source_types: [DCSource]
stf_type: HalfSinusoid
- n_sources: 1
+ n_sources: [1]
datatypes: [seismic]
hyperparameters:
h_any_P_0_Z: !beat.heart.Parameter
diff --git a/docs/anaconda_installation.rst b/docs/anaconda_installation.rst
index f943fb71..e9934317 100644
--- a/docs/anaconda_installation.rst
+++ b/docs/anaconda_installation.rst
@@ -10,59 +10,55 @@ A general advice when dealing with anaconda is that the "sudo" command must NOT
instead of the respective anaconda environment.
Below are a series of commands that might be able to get you up and running using anaconda3 (thanks to Rebecca Salvage).
-Create and activate a new conda environment e.g. called "beat" using python3.8 (minimum required is 3.8)::
+Create and activate a new conda environment e.g. called "beatenv" using python3.11 (minimum required is 3.9)::
- conda create -n beat python=3.8
- conda activate beat
+ conda create -n beatenv python=3.11
+ conda activate beatenv
cd ~/src # or wherever you keep the packages
-Download the beat source package from github (requires git to be installed on your machine)::
-
- git clone https://github.com/hvasbath/beat
-
Download and install several required packages::
- conda install -n beat libgfortran openblas theano pygpu openmpi pandas numpy openmpi
+ conda install -n beatenv libgfortran openblas pytensor numpy
-Install mpi4py through conda-forge::
+Install pymc and pyrocko packages::
- conda install -c conda-forge mpi4py
+ conda install -n beatenv -c conda-forge pymc
+ conda install -n beatenv -c pyrocko pyrocko
-Configure theano to find your libraries by creating a file ".theanorc" in your home directory containing::
+Once all the requirements are installed we install *BEAT* with::
- [blas]
- ldflags = -L/path/to/your/anaconda/environments/beat/lib -lopenblas -lgfortran
+ pip install beat
- [nvcc]
- fastmath = True
+Then for a fast check if beat is running one can start it calling the help::
- [global]
- device = cpu
- floatX = float64
+ beat init --help
-For testing if numpy and theano installations worked fine::
- cd ~/src/beat
- python3 test/numpy_test.py
- THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python3 test/gpu_test.py
+Optional: Install MPI for the PT sampler
+----------------------------------------
+Install openmpi and mpi4py through conda-forge::
-Install pymc3 and pyrocko packages::
+ conda install -n openmpi
+ conda install -c conda-forge mpi4py
- conda install -n beat -c conda-forge pymc3=3.4.1
- conda install -n beat -c pyrocko pyrocko
-Once all the requirements are installed we install BEAT with::
+Optional: Install pygmsh and cutde for the BEM module
+-----------------------------------------------------
+There are optional dependencies that are required in order to use the Boundary Element Method (BEM) module.
+For meshing *BEAT* uses the gmsh library and a python wrapper pygmsh::
- cd ~/src/beat
- pip3 install .
+ [sudo] apt install gmsh
+ pip install pygmsh
-Then for a fast check if beat is running one can start it calling the help::
+To calculate synthetic surface displacements for triangular dislocations::
- beat init --help
+ conda install -c conda-forge cutde
+
+Install and configure your GPU for *cutde* following this `page `__.
-Greens Function calculations
-----------------------------
+Optional: Greens Function calculations
+--------------------------------------
To calculate the Greens Functions we rely on modeling codes written by
`Rongjiang Wang `__.
If you plan to use the GreensFunction calculation framework,
@@ -72,7 +68,7 @@ The original codes are packaged for windows and can be found
For Unix systems the codes had to be repackaged.
-The packages below are also github repositories and you may want to use "git clone" to download:
+The packages below are also github repositories and you may want to use "git clone" to download::
git clone
diff --git a/docs/api.rst b/docs/api.rst
index 89185e32..e1bea2b5 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -91,14 +91,6 @@ The :mod:`models` Module
.. automodule:: models.geodetic
:members:
-
-The :mod:`interseismic` Module
-------------------------------
-
-.. automodule:: interseismic
- :members:
-
-
The :mod:`covariance` Module
----------------------------
@@ -106,10 +98,10 @@ The :mod:`covariance` Module
:members:
-The :mod:`theanof` Module
--------------------------
+The :mod:`pytensorf` Module
+---------------------------
-.. automodule:: theanof
+.. automodule:: pytensorf
:members:
diff --git a/docs/conf.py b/docs/conf.py
index b46416f2..f80db3d2 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -24,7 +24,7 @@
sys.path.insert(0, os.path.abspath("../beat"))
sys.setrecursionlimit(1500)
-import beat
+import beat # noqa
# -- General configuration ------------------------------------------------
@@ -43,7 +43,7 @@
# 'sphinx.ext.mathjax',
"sphinx.ext.viewcode",
"sphinx.ext.doctest",
- "sphinx.ext.napoleon"
+ "sphinx.ext.napoleon",
# 'numpydoc'
]
@@ -367,9 +367,9 @@
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"numpy": ("http://docs.scipy.org/doc/numpy/", None),
- "pymc3": ("https://docs.pymc.io/", None),
+ "pymc": ("https://www.pymc.io/", None),
"pyrocko": ("https://pyrocko.org/", None),
- "theano": ("https://theano-pymc.readthedocs.io/", None),
+ "pytensor": ("https://pytensor.readthedocs.io/", None),
}
@@ -387,8 +387,6 @@ def process_signature(app, what, name, obj, options, signature, return_annotatio
def skip_member(app, what, name, obj, skip, options):
- from pyrocko import guts
-
if what == "class" and name == "dummy_for":
return True
if what == "class" and name == "T":
diff --git a/docs/examples/FFI_static.rst b/docs/examples/FFI_static.rst
index 890e9341..cfe59928 100644
--- a/docs/examples/FFI_static.rst
+++ b/docs/examples/FFI_static.rst
@@ -123,12 +123,12 @@ Under the *problem_config* we find the parameters that we need to adjust::
regularization: none
npatches: 121
initialization: random
- source_type: RectangularSource
+ source_types: [RectangularSource]
stf_type: HalfSinusoid
decimation_factors:
geodetic: 1
seismic: 1
- n_sources: 1
+ n_sources: [1]
datatypes: [geodetic, seismic]
hyperparameters:
h_SAR: !beat.heart.Parameter
diff --git a/docs/examples/MTQT_polarity.rst b/docs/examples/MTQT_polarity.rst
index 708685fc..d4145e3a 100644
--- a/docs/examples/MTQT_polarity.rst
+++ b/docs/examples/MTQT_polarity.rst
@@ -200,9 +200,9 @@ This is defined in the *problem_config* (source specification)::
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: MTQTSource
+ source_types: [MTQTSource]
stf_type: Triangular
- n_sources: 1
+ n_sources: [1]
datatypes:
- polarity
diff --git a/docs/examples/dc_teleseismic.rst b/docs/examples/dc_teleseismic.rst
index 19a9035b..1e0dda7c 100644
--- a/docs/examples/dc_teleseismic.rst
+++ b/docs/examples/dc_teleseismic.rst
@@ -9,7 +9,7 @@ To copy the example (including the data) to a directory outside of the package s
please edit the 'model path' (referred to as $beat_models now on) to your preference path and execute::
cd /path/to/beat/data/examples/
- beat clone dc_teleseismic /'model path'/Laquila_dc --copy_data --datatypes=seismic --source_type=DCSource --sampler=PT
+ beat clone dc_teleseismic /'model path'/Laquila_dc --copy_data --datatypes=seismic --source_types=DCSource --sampler=PT
This will create a BEAT project directory named 'Laquila_dc' with a configuration file (config_geometry.yaml) and
real example data (seismic_data.pkl). This directory is going to be referred to as '$project_directory' in the following.
diff --git a/docs/faq.rst b/docs/faq.rst
index 7a6319ed..c5f13332 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -9,9 +9,9 @@ The variations in the likelihood space are too large. When normalising the space
Please reduce the potential search interval for hyper parameters "h_*", i.e. reduce and/or increase the upper and/or lower bounds in the
project config_*.yaml, respectively.
-| **2. Theano MKL support**
+| **2. Pytensor MKL support**
-RuntimeError: To use MKL 2018 with Theano you MUST set "MKL_THREADING_LAYER=GNU" in your environment.
+RuntimeError: To use MKL 2018 with Pytensor you MUST set "MKL_THREADING_LAYER=GNU" in your environment.
| add
| export MKL_THREADING_LAYER=GNU
@@ -20,9 +20,9 @@ RuntimeError: To use MKL 2018 with Theano you MUST set "MKL_THREADING_LAYER=GNU"
| **3. Slow compilation**
No error will be thrown, but during "beat sample" the compilation of the forward model function ḿay take a long time.
-In such a case the default compilation flags of theano may be overwritten. This may result in longer runtime.::
+In such a case the default compilation flags of Pytensor may be overwritten. This may result in longer runtime.::
- THEANO_FLAGS=optimizer=fast_compile beat sample Projectpath
+ PYTENSOR_FLAGS=optimizer=fast_compile beat sample Projectpath
| **4. MPI rank always 0**
diff --git a/docs/getting_started/ffi_setup.rst b/docs/getting_started/ffi_setup.rst
index 5f299213..51088f78 100644
--- a/docs/getting_started/ffi_setup.rst
+++ b/docs/getting_started/ffi_setup.rst
@@ -10,14 +10,14 @@ Optimizing for the rupture nucleation point makes the problem non-linear.
The finite fault inference in beat is considered to be a follow-up step of the geometry estimation for a RectangularSource. Which is why first, a new project directory to solve for the geometry of a RectangularSource has to be created. If the reader has setup such a problem already and finished the sampling for a the geometry the next command can be skipped.::
- beat init FFIproject --datatypes='seismic' --source_type='RectangularSource' --n_sources=1
+ beat init FFIproject --datatypes='seismic' --source_types='RectangularSource' --n_sources=1
If an estimation for the geometry of another source has been done or setup (e.g. MTSource), one can clone this project folder and replace the source object. This saves
time for specification of the inputs. How to setup the configurations for a "geometry" estimation is discussed
`here `__ exemplary on a MomentTensor for regional seismic data.
-The "source_type" argument will replace any existing source with the specified source for the new project. With the next project we replace the old source with a RectangularSource.::
+The "source_types" argument will replace any existing source with the specified sources for the new project. With the next project we replace the old source with a RectangularSource.::
- beat clone MTproject FFIproject --datatypes='seismic' --source_type='RectangularSource' --copy_data
+ beat clone MTproject FFIproject --datatypes='seismic' --source_types='RectangularSource' --copy_data
Now the Green's Functions store(s) have to be calculated for the "geometry" problem if not done so yet. Instructions on this and what to keep in mind are given `here `__. For illustration, the user might have done a MomentTensor estimation already on teleseismic data using Green's Functions depth and distance sampling of 1km with 1Hz sampling. This may be accurate enough for this type of problem, however for a finite fault inference the aim is to resolve details of the rupture propagation and the slip distribution. So the setup parameters of the "geometry" Green's Functions would need to be changed to higher resolution. A depth and distance sampling of 250m and 4Hz sample rate might be precise enough, if waveforms up to 1Hz are to be used in the sampling. Of course, these parameters depend on the problem setup and have to be adjusted individually for each problem!
diff --git a/docs/getting_started/general.rst b/docs/getting_started/general.rst
index 6b26daa2..7718d292 100644
--- a/docs/getting_started/general.rst
+++ b/docs/getting_started/general.rst
@@ -24,14 +24,17 @@ Will display::
seismic".
--mode=MODE Inversion problem to solve; "geometry", "ffi",
"interseismic" Default: "geometry"
- --source_type=SOURCE_TYPE
- Source type to solve for; ExplosionSource",
- "RectangularExplosionSource", "DCSource",
- "CLVDSource", "MTSource", "RectangularSource",
- "DoubleDCSource", "RingfaultSource. Default:
- "RectangularSource"
+ --source_types=SOURCE_TYPES
+ List of source types to solve for. Can be any combination of
+ the following for mode: geometry - ExplosionSource,
+ RectangularExplosionSource, SFSource, DCSource,
+ CLVDSource, MTSource, MTQTSource, RectangularSource,
+ DoubleDCSource, RingfaultSource; bem - DiskBEMSource,
+ RingfaultBEMSource; Default: 'RectangularSource'
--n_sources=N_SOURCES
- Integer Number of sources to invert for. Default: 1
+ List of integer numbers of sources per source type to
+ invert for. Default: [1]
+
--waveforms=WAVEFORMS
Waveforms to include in the setup; "any_P, any_S,
slowest".
diff --git a/docs/getting_started/init_project.rst b/docs/getting_started/init_project.rst
index 497b76ab..8f4acb42 100644
--- a/docs/getting_started/init_project.rst
+++ b/docs/getting_started/init_project.rst
@@ -5,7 +5,7 @@ Each modeling project is initiated with the "beat init" command. There are many
For example to optimize for a Full Moment Tensor for the Landers EQ by using seismic data, with station dependent Greens Functions for P and S waves with the default sampling algorithm (Sequential Monte Carlo) run::
- beat init LandersEQ 1992-06-28 --datatypes='seismic' --individual_gfs --n_sources=1 --source_type=MTSource --min_mag=7
+ beat init LandersEQ 1992-06-28 --datatypes='seismic' --individual_gfs --n_sources=1 --source_types=MTSource --min_mag=7
This will create project directory called LandersEQ in the current directory.
Within the directory you will see that there have been two files created:
@@ -63,9 +63,9 @@ This example configuration file looks like this::
project_dir: /home/vasyurhm/BEATS/LandersEQ
problem_config: !beat.ProblemConfig
mode: geometry
- source_type: MTSource
+ source_types: [MTSource]
stf_type: HalfSinusoid
- n_sources: 1
+ n_sources: [1]
datatypes: [seismic]
hyperparameters:
h_any_P_Z: !beat.heart.Parameter
@@ -252,7 +252,7 @@ Initialize modeling project of an unlisted earthquake
To create a customizable moment tensor project for an earthquake not included in any moment tensor
catalog, run::
- beat init newEQ --datatypes='seismic' --mode='geometry' --source_type='MTSource' --waveforms='any_P, any_S, slowest' --use_custom
+ beat init newEQ --datatypes='seismic' --mode='geometry' --source_types='MTSource' --waveforms='any_P, any_S, slowest' --use_custom
This creates the folder “newEQ” with a *config_geometry.yaml* file inside. Some parameters should be
manually edited and filled up. These are some suggested initial changes in the configuration file:
diff --git a/docs/installation.rst b/docs/installation.rst
index ce27cfff..1abedb37 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -4,7 +4,9 @@
Detailed Installation instructions
**********************************
-BEAT can be installed on any Unix based system with python>=3.8
+That section covers the detailed installation of numerical libraries to speedup performance of numpy and Pytensor.
+
+BEAT can be installed on any Unix based system with python>=3.9
that supports its prerequisites.
@@ -25,7 +27,7 @@ optimizations of your numerics libraries.
Then we will need a fortran compiler and the python developers library::
- sudo apt-get install git python3-dev gfortran
+ [sudo] apt-get install git python3-dev gfortran
BEAT does many intensive calculations, which is why we need to get as much as
possible out of the available libraries in terms of computational efficiency.
@@ -36,13 +38,15 @@ Although, this process is somewhat tedious and not straight forward for
everybody, it is really worth doing so! If you have done a similar optimization
to your machine's libraries already, you can start at the Main Packages section.
-For everyone else I summarized the relevant points below.
+For everyone else I summarized the relevant points below. BEAT v2 runs on the successor of *theano*, which is *pytensor*.
+Thus, I keep here the link to the *theano* configuration below, since it still might be helpful.
For all the heavy details I refer to these links:
`Numpy configure `__
`Theano configure `__
+`Pytensor configure `__
OpenBlas
""""""""
@@ -50,10 +54,10 @@ If the OpenBlas library is compiled locally, it is optimized for your machine
and speeds up the calculations::
cd ~/src # or where you keep your source files
- git clone https://github.com/xianyi/OpenBLAS
+ git clone https://github.com/OpenMathLib/OpenBLAS
cd OpenBLAS
make FC=gfortran
- sudo make PREFIX=/usr/local install
+ [sudo] make PREFIX=/usr/local install
Now we have to tell the system where to find the new OpenBLAS library.
In the directory /etc/ld.so.conf.d/ should be a file `libc.conf` containing
@@ -84,7 +88,7 @@ Numpy
This following step is completely optional and one may decide to use a standard pip numpy package.
Building numpy from source requires cython::
- pip3 install cython
+ pip install cython
If you compile numpy locally against the previously installed OpenBlas
library you can gain significant speedup. For my machine it resulted
@@ -140,35 +144,36 @@ Depending on your hardware something around these numbers should be fine!::
Eigendecomp of (1500,1500) matrix in 36.625 s
-Theano
-""""""
-Theano is a package that was originally designed for deep learning and enables
+pytensor
+""""""""
+Pytensor is a package that was originally designed for deep learning and enables
to compile the python code into GPU cuda code or CPU C++. Therefore, you can
decide to use the GPU of your computer rather than the CPU, without needing to
reimplement all the codes. Using the GPU is very much useful, if many heavy
matrix multiplications have to be done, which is the case for some of the BEAT
models (static and kinematic optimizers). Thus, it is worth to spent the time
-to configure your theano to efficiently use your GPU. Even if you dont plan to
+to configure your Pytensor to efficiently use your GPU. Even if you dont plan to
use your GPU, these instructions will help boosting your CPU performance as
well.
For the bleeding edge installation do::
cd ~/src
- git clone https://github.com/Theano/Theano
- cd Theano
- python3 setup.py install
+ git clone https://github.com/pymc-devs/pytensor/
+ cd pytensor
+ pip3 install .
For any troubleshooting and detailed installation instructions I refer to the
-`Theano `__ webpage.
+`pytensor `__ webpage.
CPU setup
#########
Optional: Setup for libamdm
___________________________
+Might be DEPRECATED! I haven't had the chance to test again. In case you do, please let me know updates on that!
Only for 64-bit machines!
-This again speeds up the elementary operations! Theano will for sure work
+This again speeds up the elementary operations! Pytensor will for sure work
without including this, but the performance increase (below)
will convince you to do so ;) .
@@ -192,7 +197,7 @@ $ROOT=/usr/local/ ::
General
_______
-In your home directory create a file `.theanorc`.
+In your home directory create a file `.Pytensorrc`.
The file has to be edited depending on the type of processing unit that is
intended to be used. Set amdlibm = True if you did the optional step! ::
@@ -210,50 +215,6 @@ intended to be used. Set amdlibm = True if you did the optional step! ::
amdlibm = False # if applicable set True here
-GPU setup DEPRECATED
-####################
-Only for Theano version < 0.9.
-For NVIDIA graphics cards there is the CUDA package that needs to be installed.::
-
- sudo apt-get install nvidia-current
- sudo apt-get install nvdidia-cuda-toolkit
-
-Restart the system.
-To check if the installation worked well type::
-
- nvidia-smi
-
-This should display stats about your graphics card model.
-
-Now we have to tell theano where to find the cuda package.
-For doing so we have to add the library folder to the $LD_LIBRARY_PATH and the
-CUDA root direct to the $PATH.
-
-In bash you can do it like this, e.g. (depending on the path to your cuda
-installation) add to your .bashrc file in the home directory::
-
- export CUDA_LIB="/usr/local/cuda-5.5/lib64"
- export CUDA_ROOT="/usr/local/cuda-5.5/bin"
-
- export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$CUDA_LIB
- export PATH=${PATH}:$CUDA_ROOT
-
-Theano also supports OpenCL, however, I haven't set it up myself so far and
-cannot provide instructions on how to do it.
-
-In your home directory create a file `.theanorc` with these settings::
-
- [blas]
- ldflags = -L/usr/local/lib -lopenblas -lgfortran
-
- [nvcc]
- fastmath = True
-
- [global]
- device = gpu
- floatX = float32
-
-
Check performance
#################
@@ -264,7 +225,7 @@ as intended::
Using the CPU (amdlibm = False)::
- THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python3 test/gpu_test.py
+ Pytensor_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python3 test/gpu_test.py
[Elemwise{exp,no_inplace}()]
Looping 1000 times took 2.717895 seconds
@@ -274,7 +235,7 @@ Using the CPU (amdlibm = False)::
Using the CPU (amdlibm = True)::
- THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python3 test/gpu_test.py
+ Pytensor_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python3 test/gpu_test.py
[Elemwise{exp,no_inplace}()]
Looping 1000 times took 0.703979 seconds
@@ -287,7 +248,7 @@ That's a speedup of 3.86! On the ELEMENTARY operations like exp(), log(), cos()
Using the GPU::
- THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python3 src/test/gpu_test.py
+ Pytensor_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python3 src/test/gpu_test.py
Using gpu device 0: Quadro 5000 (CNMeM is disabled, cuDNN not available)
[GpuElemwise{exp,no_inplace}(),
@@ -298,90 +259,3 @@ Using the GPU::
Used the gpu
Congratulations, you are done with the numerics installations!
-
-
-Main Packages
--------------
-
-BEAT relies on 2 main libraries. Detailed installation instructions for each
-can be found on the respective websites:
-
- - `pymc3 `__
- - `pyrocko `__
-
-
-pymc3
-"""""
-Pymc3 is a framework that provides various optimization algorithms allows and
-allows to build Bayesian models. BEAT relies on an older version of pymc3- work into upgrading it::
-
- pip3 install pymc3==3.4.1
-
-
-Pyrocko
-"""""""
-Pyrocko is an extensive library for seismological applications and provides a
-framework to efficiently store and access Greens Functions.::
-
- cd ~/src
- git clone git://github.com/pyrocko/pyrocko.git pyrocko
- cd pyrocko
- pip3 install .
-
-OpenMPI
-"""""""
-For the Parallel Tempering algorithm OpenMPI and the python
-bindings are required. If you do not have any MPI library installed, this needs to be installed first.
-For now BEAT only supports MPI versions <3. Available mpi versions can be listed with the command::
-
- apt-cache madison libopenmpi-dev
-
-To install openmpi for a specific version for example version 2.1.1-8::
-
- sudo apt install openmpi-bin=2.1.1-8 libopenmpi-dev=2.1.1-8 -V
-
-Finally, the python wrapper::
-
- sudo pip3 install mpi4py
-
-
-BEAT
-""""
-After these long and heavy installations, you can setup BEAT itself::
-
- cd ~/src/beat
- pip3 install .
-
-Greens Functions
-----------------
-
-To calculate the Greens Functions we rely on modeling codes written by
-`Rongjiang Wang `__.
-If you plan to use the GreensFunction calculation framework,
-these codes are required and need to be compiled manually.
-The original codes are packaged for windows and can be found
-`here `__.
-
-For Unix systems the codes had to be repackaged.
-
-The packages below are also github repositories and you may want to use "git clone" to download:
-
- git clone
-
-For example to clone the github repository for QSEIS please execute::
-
- git clone https://github.com/pyrocko/fomosto-qseis
-
-This also enables easy updating for potential future changes.
-
-For configuration and compilation please follow the descriptions provided in each repository respectively.
-
-Seismic synthetics
-
-* `QSEIS `__
-* `QSSP `__
-
-
-Geodetic synthetics
-
-* `PSGRN_PSCMP `__
diff --git a/docs/short_installation.rst b/docs/short_installation.rst
index 725c7085..86ede78d 100644
--- a/docs/short_installation.rst
+++ b/docs/short_installation.rst
@@ -4,7 +4,8 @@
Short Installation instructions
*******************************
-BEAT can be installed on any Unix based system with python==3.8 that supports its prerequisites.
+Starting BEAT v2.0.0 can be installed on any Unix based system with python>=3.9 that supports its prerequisites.
+Earlier versions must be installed on python3.8!
Please consider to use `virtual environments `__ to lower the risk of package conflicts.
@@ -21,9 +22,9 @@ Create a directory *virtualenvs* where you want to keep your virtual environment
mkdir virtualenvs
cd virtualenvs
-Create new environment e.g. *beat_env* and activate it::
+Create new environment e.g. *beat_env* using python3.11 (for other version just change the number e.g.: python3.9) and activate it::
- python3 -m venv beat_env
+ python3.11 -m venv beat_env
source ~/virtualenvs/beat_env/bin/activate
The environment can be (later) deactivated NOT NOW!, with::
@@ -66,6 +67,49 @@ Once the development headers are installed. Only switching between gitbranches-
git checkout $branch_name
+Optional Dependencies
+---------------------
+For using the BEM module
+""""""""""""""""""""""""
+The new Boundary Element Modeling (BEM) module requires extra dependencies (and dependencies within)::
+
+ pygmsh
+ cutde
+
+To install *pygmsh*::
+
+ [sudo] apt install python3-gmsh # this will be a system wide installation of gmsh
+ pip install pygmsh # this will install the python abstraction library around gmsh
+
+To install *cutde*::
+
+ pip install cutde
+
+This will be sufficient to run *cutde* on the CPU using its C++ backend. However, that will render sampling slow
+to the point that it is not useful. In order to use the BEM module of BEAT for sampling a GPU is required.
+Install instructions for installing the GPU depending on your system architecture for *cutde* are `here `__.
+
+For using the PT sampler
+""""""""""""""""""""""""
+For the Parallel Tempering (PT) algorithm OpenMPI and the python
+bindings are required. If you do not have any MPI library installed, this needs to be installed first.::
+
+ [sudo] apt install openmpi-bin libopenmpi-dev
+
+Finally, the python wrapper::
+
+ pip3 install mpi4py
+
+
+If a particular mpi version is required, they can be listed with the command::
+
+ apt-cache madison libopenmpi-dev
+
+To install openmpi for a specific version for example version 2.1.1-8::
+
+ [sudo] apt install openmpi-bin=2.1.1-8 libopenmpi-dev=2.1.1-8 -V
+
+
Greens Function calculations
----------------------------
diff --git a/extras/beat b/extras/beat
index 2fd970c6..78aa084e 100644
--- a/extras/beat
+++ b/extras/beat
@@ -19,19 +19,22 @@ _beat_options()
cur=${COMP_WORDS[COMP_CWORD]}
- _avail_plots="velocity_models stage_posteriors correlation_hist slip_distribution slip_distribution_3d moment_rate station_map waveform_fits scene_fits gnss_fits fuzzy_beachball fuzzy_mt_decomp hudson lune station_variance_reductions"
+ _avail_plots="""
+ velocity_models stage_posteriors correlation_hist slip_distribution slip_distribution_3d moment_rate
+ station_map station_variance_reductions waveform_fits scene_fits gnss_fits fuzzy_beachball fuzzy_mt_decomp
+ hudson lune station_variance_reductions"""
_std="--mode --help -h --loglevel --main_path"
declare -A arg_subsub
- arg_subsub["init"]="--min_mag --datatypes --source_type --n_sources --sampler --hyper_sampler --use_custom --individual_gfs $_std"
+ arg_subsub["init"]="--min_mag --datatypes --source_types --n_sources --sampler --hyper_sampler --use_custom --individual_gfs $_std"
arg_subsub["build_gfs"]="--datatypes --plot --force --execute $_std"
arg_subsub["import"]="--results --datatypes --geodetic_format --seismic_format --force --import_from_mode $_std"
arg_subsub["update"]="--parameters --diff $_std"
arg_subsub["sample"]="--hypers $_std"
arg_subsub["summarize"]="--stage_number --calc_derived --force $_std"
arg_subsub["export"]="--stage_number --reference --fix_output --force $_std"
- arg_subsub["clone"]="--datatypes --source_type --sampler --copy_data $_std"
+ arg_subsub["clone"]="--datatypes --source_types --n_sources --sampler --copy_data $_std"
arg_subsub["plot"]="--post_llk --stage_number --varnames --format --dpi --force --nensemble --reference --source_idxs --hypers --build --plot_projection $_std"
arg_subsub["check"]="--datatypes --what --targets $_std"
diff --git a/pyproject.toml b/pyproject.toml
index 07d44288..c69e6504 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta"
[project]
name = "beat"
-version = "1.2.5"
-requires-python = ">=3.8"
+version = "2.0.0"
+requires-python = ">=3.9"
license = {text = "GPLv3"}
description = "'Bayesian Earthquake Analysis Tool'"
readme = "README.md"
@@ -19,20 +19,20 @@ keywords = ["Bayesian Inference", "seismology", "geodesy", "earthquake", "volcan
classifiers = [
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Physics",
- "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Programming Language :: C",
"Operating System :: POSIX",
"Operating System :: MacOS"
]
dependencies = [
- "numpy==1.22.4",
- "scipy>=1.8.0",
- "pymc3==3.4.1",
+ "numpy",
+ "scipy",
+ "pytensor>=2.18.4",
+ "pymc>=5.10.0",
"tqdm>=4.64.0",
"matplotlib>=3.1.1",
"psutil>=5.9.0",
- "mpi4py>=3.1.3",
- "pyrocko>=2022.06.10"
+ "pyrocko>=2023.10.11"
]
[project.urls]
@@ -41,7 +41,7 @@ GitHub = "https://github.com/hvasbath/beat"
Issues = "https://github.com/hvasbath/beat/issues"
[project.optional-dependencies]
-formatting = ["flake8", "black"]
+formatting = ["flake8", "ruff"]
[project.scripts]
beat = "beat.apps.beat:main"
diff --git a/setup.py b/setup.py
index c0b0145e..68545de8 100644
--- a/setup.py
+++ b/setup.py
@@ -6,12 +6,11 @@
from distutils.sysconfig import get_python_inc
from setuptools import Extension, setup
-from setuptools.command.build_py import build_py
op = os.path
packname = "beat"
-version = "1.2.5"
+version = "2.0.0"
try:
@@ -35,7 +34,6 @@ class NotInAGitRepos(Exception):
def git_infos():
-
from subprocess import PIPE, run
"""Query git about sha1 of last commit and check if there are local \
diff --git a/test/gpu_test.py b/test/gpu_test.py
index 18cef42e..1c9e2add 100644
--- a/test/gpu_test.py
+++ b/test/gpu_test.py
@@ -1,15 +1,15 @@
import time
import numpy
-import theano.tensor as T
-from theano import config, function, sandbox, shared
+import pytensor.tensor as tt
+from pytensor import config, function, shared
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
-f = function([], T.exp(x))
+f = function([], tt.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
@@ -17,7 +17,9 @@
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
-if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
+if numpy.any(
+ [isinstance(x.op, tt.elemwise.Elemwise) for x in f.maker.fgraph.toposort()]
+):
print("Used the cpu")
else:
print("Used the gpu")
diff --git a/test/pt_toy_example.py b/test/pt_toy_example.py
index 88d626f8..ccb998f2 100644
--- a/test/pt_toy_example.py
+++ b/test/pt_toy_example.py
@@ -27,7 +27,6 @@ def metrop_select(m1, m2):
def master_process(comm, size, tags, status):
-
num_workers = size - 1
tasks = range(num_workers)
chain = []
diff --git a/test/test_backend.py b/test/test_backend.py
index f07648a0..eea2318e 100644
--- a/test/test_backend.py
+++ b/test/test_backend.py
@@ -86,7 +86,6 @@ def two_gaussians(x):
self.expected_chain_data[data_key] = num.array(data)
def test_text_chain(self):
-
textchain = TextChain(dir_path=self.test_dir_path, model=self.PT_test)
textchain.setup(10, 0, overwrite=True)
@@ -111,7 +110,6 @@ def test_text_chain(self):
)
def test_chain_bin(self):
-
numpy_chain = NumpyChain(dir_path=self.test_dir_path, model=self.PT_test)
numpy_chain.setup(10, 0, overwrite=True)
print(numpy_chain)
diff --git a/test/test_bem.py b/test/test_bem.py
new file mode 100644
index 00000000..c4fad737
--- /dev/null
+++ b/test/test_bem.py
@@ -0,0 +1,320 @@
+import logging
+import unittest
+
+import numpy as num
+from matplotlib import pyplot as plt
+
+# from numpy.testing import assert_allclose
+from pyrocko import util
+from pyrocko.gf.targets import StaticTarget
+
+from beat.bem import (
+ BEMEngine,
+ CurvedBEMSource,
+ DiskBEMSource,
+ RectangularBEMSource,
+ RingfaultBEMSource,
+ TriangleBEMSource,
+ check_intersection,
+)
+from beat.config import BEMConfig
+from beat.plotting.bem import slip_distribution_3d
+
+km = 1.0e3
+pi = num.pi
+logger = logging.getLogger("test_sources")
+
+mesh_size = 1.0
+
+
+def get_static_target(bounds, npoints=100):
+ xs = num.linspace(*bounds, npoints)
+ ys = num.linspace(*bounds, npoints)
+ obsx, obsy = num.meshgrid(xs, ys)
+ return StaticTarget(east_shifts=obsx.ravel(), north_shifts=obsy.ravel())
+
+
+def plot_static_result(result, target, npoints=100):
+ fig, axs = plt.subplots(1, 3, figsize=(17, 5), dpi=300)
+ for i, comp in enumerate(["n", "e", "d"]):
+ ax = axs[i]
+ disp_grid = result[f"displacement.{comp}"].reshape((npoints, npoints))
+ if comp == "d":
+ disp_grid *= -1
+ grd_e = target.east_shifts.reshape((npoints, npoints))
+ grd_n = target.north_shifts.reshape((npoints, npoints))
+ cntf = ax.contourf(grd_e, grd_n, disp_grid, levels=21)
+ ax.contour(
+ grd_e,
+ grd_n,
+ disp_grid,
+ colors="k",
+ linestyles="-",
+ linewidths=0.5,
+ levels=21,
+ )
+ # print("displ min max", disp_grid.min(), disp_grid.max())
+ ax.set_title(f"$u_{comp}$")
+
+ cb = plt.colorbar(cntf)
+ cb.set_label("Displacement [m]")
+ fig.tight_layout()
+ return fig, axs
+
+
+def get_triangle_tensile_only_setup():
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ TriangleBEMSource(
+ normal_traction=2.15e6,
+ p1=(km, km, -2 * km),
+ p2=(-km, -0.5 * km, -2 * km),
+ p3=(1 * km, -1.5 * km, -2 * km),
+ )
+ ]
+
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["strike", "dip"]:
+ bcond.source_idxs = []
+ return config, sources, targets
+
+
+def get_disk_setup():
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ DiskBEMSource(
+ traction=1.15e6,
+ normal_traction=0,
+ rake=-45,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ a_half_axis=3 * km,
+ b_half_axis=1.8 * km,
+ dip=45,
+ strike=210,
+ )
+ ]
+ return BEMConfig(mesh_size=mesh_size), sources, targets
+
+
+def get_disk_tensile_only_setup():
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ DiskBEMSource(
+ normal_traction=2.15e6,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ a_half_axis=1 * km,
+ b_half_axis=1.0 * km,
+ dip=0,
+ strike=30,
+ )
+ ]
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["strike", "dip"]:
+ bcond.source_idxs = []
+ return config, sources, targets
+
+
+def get_disk_ringfault_setup(intersect=False):
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+
+ if intersect:
+ a_half_axis_bottom = 2.5 * km
+ b_half_axis_bottom = 2.5 * km
+ depth = 3.0 * km
+ else:
+ a_half_axis_bottom = 3.5 * km
+ b_half_axis_bottom = 3.5 * km
+ depth = 4.2 * km
+
+ sources = [
+ DiskBEMSource(
+ normal_traction=2.15e6,
+ north_shift=0.0 * km,
+ east_shift=3.5 * km,
+ depth=depth,
+ a_half_axis=a_half_axis_bottom,
+ b_half_axis=b_half_axis_bottom,
+ dip=0,
+ strike=0,
+ ),
+ RingfaultBEMSource(
+ north_shift=0.0,
+ east_shift=3.5 * km,
+ delta_north_shift_bottom=0.0 * km,
+ depth=0.5 * km,
+ depth_bottom=3.9 * km,
+ a_half_axis=2 * km,
+ b_half_axis=2 * km,
+ a_half_axis_bottom=a_half_axis_bottom,
+ b_half_axis_bottom=b_half_axis_bottom,
+ strike=5,
+ ),
+ ]
+
+ config = BEMConfig(mesh_size=1.5)
+ for bcond in config.boundary_conditions.iter_conditions():
+ bcond.receiver_idxs = [0, 1]
+ if bcond.slip_component in ["strike", "dip"]:
+ bcond.source_idxs = [1]
+
+ return config, sources, targets
+
+
+def get_rectangular_setup_strikeslip():
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ RectangularBEMSource(
+ traction=1.15e6,
+ rake=0,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ length=10 * km,
+ width=5 * km,
+ dip=75,
+ strike=20,
+ )
+ ]
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["normal"]:
+ bcond.source_idxs = []
+
+ return config, sources, targets
+
+
+def get_rectangular_setup_dipslip():
+ # mesh_size = 1. * km
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ RectangularBEMSource(
+ traction=1.15e6,
+ rake=90,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ length=10 * km,
+ width=5 * km,
+ dip=30,
+ strike=0,
+ )
+ ]
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["normal"]:
+ bcond.source_idxs = []
+
+ return config, sources, targets
+
+
+def get_rectangular_setup_opening():
+ # mesh_size = 1. * km
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ RectangularBEMSource(
+ normal_traction=1.15e6,
+ rake=90,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ length=10 * km,
+ width=5 * km,
+ dip=30,
+ strike=0,
+ )
+ ]
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["dip", "strike"]:
+ bcond.source_idxs = []
+
+ return config, sources, targets
+
+
+def get_curved_setup_dipslip():
+ targets = [get_static_target([-10 * km, 10 * km], 100)]
+ sources = [
+ CurvedBEMSource(
+ traction=1.15e6,
+ rake=90,
+ north_shift=0.5 * km,
+ depth=3.5 * km,
+ length=15 * km,
+ width=7 * km,
+ dip=30,
+ strike=310,
+ bend_location=0.5,
+ bend_amplitude=0.3,
+ curv_location_bottom=0.0,
+ curv_amplitude_bottom=0.0,
+ )
+ ]
+ config = BEMConfig(mesh_size=mesh_size)
+ for bcond in config.boundary_conditions.iter_conditions():
+ if bcond.slip_component in ["normal"]:
+ bcond.source_idxs = []
+
+ return config, sources, targets
+
+
+class TestBEM(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+ def _run_bem_engine(self, setup_function, plot=True, **kwargs):
+ config, sources, targets = setup_function(**kwargs)
+
+ engine = BEMEngine(config)
+ response = engine.process(sources=sources, targets=targets)
+
+ results = response.static_results()
+ if plot and response.is_valid:
+ for i, result in enumerate(results):
+ plot_static_result(result.result, targets[i])
+
+ slip_vectors = response.source_slips()
+
+ slip_distribution_3d(response.discretized_sources, slip_vectors, debug=True)
+ plt.show()
+
+ def test_bem_engine_tensile_only_triangle(self):
+ self._run_bem_engine(get_triangle_tensile_only_setup)
+
+ def test_bem_engine_tensile_only_dike(self):
+ self._run_bem_engine(get_disk_tensile_only_setup)
+
+ def test_bem_engine_dike(self):
+ self._run_bem_engine(get_disk_setup)
+
+ def test_bem_engine_rectangle(self):
+ self._run_bem_engine(get_rectangular_setup_strikeslip)
+ self._run_bem_engine(get_rectangular_setup_dipslip)
+ self._run_bem_engine(get_rectangular_setup_opening)
+
+ def test_bem_engine_curved(self):
+ # self._run_bem_engine(get_quadrangular_setup_strikeslip)
+ self._run_bem_engine(get_curved_setup_dipslip)
+
+ def test_bem_engine_dike_ringfault(self):
+ kwargs = {"intersect": True}
+ self._run_bem_engine(get_disk_ringfault_setup, **kwargs)
+
+ kwargs = {"intersect": False}
+ self._run_bem_engine(get_disk_ringfault_setup, **kwargs)
+
+ def test_bem_source_intersection(self):
+ config, sources, _ = get_disk_ringfault_setup(intersect=True)
+
+ intersect = check_intersection(sources, mesh_size=config.mesh_size * km)
+ assert intersect is True
+
+ config, sources, _ = get_disk_ringfault_setup(intersect=False)
+
+ intersect = check_intersection(sources, mesh_size=config.mesh_size * km)
+ assert intersect is False
+
+
+if __name__ == "__main__":
+ util.setup_logging("test_bem", "info")
+ unittest.main()
diff --git a/test/test_composites.py b/test/test_composites.py
index c33f8fe0..010e1f00 100644
--- a/test/test_composites.py
+++ b/test/test_composites.py
@@ -1,15 +1,12 @@
import logging
import os
-import shutil
import unittest
from copy import deepcopy
-from tempfile import mkdtemp
import numpy as num
-import theano.tensor as tt
from numpy.testing import assert_allclose
-from pyrocko import orthodrome, plot, trace, util
-from theano import function, shared
+from pyrocko import util
+from pytensor import function
from beat import models
diff --git a/test/test_config.py b/test/test_config.py
new file mode 100644
index 00000000..27d58e27
--- /dev/null
+++ b/test/test_config.py
@@ -0,0 +1,68 @@
+import logging
+import unittest
+from time import time
+
+import numpy as num
+from pyrocko import util
+
+from beat.config import SourcesParameterMapping
+from beat.utility import split_point
+
+logger = logging.getLogger("test_config")
+
+
+class TestConfig(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+ def test_parameter_source_mapping(self):
+ mapping = SourcesParameterMapping(datatypes=["geodetic", "seismic"])
+
+ sources_variables_one = {
+ "east_shift": 1,
+ "a_half_axis": 1,
+ }
+
+ sources_variables_two = {
+ "east_shift": 2,
+ "depth_bottom": 2,
+ }
+
+ mapping.add(
+ datatype="geodetic",
+ sources_variables=[sources_variables_one, sources_variables_two],
+ )
+
+ sources_variables_one["duration"] = 1
+
+ mapping.add(
+ datatype="seismic",
+ sources_variables=[sources_variables_one, sources_variables_two],
+ )
+
+ vars_sizes = mapping.unique_variables_sizes()
+ point = {varname: num.arange(size) for varname, size in vars_sizes.items()}
+
+ t0 = time()
+ spoint = split_point(point, mapping=mapping["geodetic"], n_sources_total=3)
+ t1 = time()
+
+ assert len(spoint) == 3
+ assert "depth_bottom" not in spoint[0].keys()
+ assert "depth_bottom" in spoint[1].keys()
+ assert "depth_bottom" in spoint[2].keys()
+
+ for point in spoint:
+ assert "east_shift" in point.keys()
+ assert "duration" not in point.keys()
+
+ point = {varname: num.arange(size) for varname, size in vars_sizes.items()}
+ spoint = split_point(point, mapping=mapping["seismic"], n_sources_total=3)
+
+ assert "duration" in spoint[0].keys()
+ print(spoint, t1 - t0)
+
+
+if __name__ == "__main__":
+ util.setup_logging("test_config", "info")
+ unittest.main()
diff --git a/test/test_covariance.py b/test/test_covariance.py
index daded0b7..49561f73 100644
--- a/test/test_covariance.py
+++ b/test/test_covariance.py
@@ -1,17 +1,16 @@
import logging
import unittest
-from time import time
import numpy as num
from matplotlib import pyplot as plt
from pyrocko import util
+from pytest import mark
from beat.covariance import non_toeplitz_covariance, non_toeplitz_covariance_2d
from beat.heart import Covariance
from beat.models import load_model
from beat.utility import get_random_uniform
-
num.random.seed(10)
logger = logging.getLogger("test_covariance")
@@ -22,7 +21,6 @@ def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def test_non_toeplitz(self):
-
ws = 500
a = num.random.normal(scale=2, size=ws)
cov = non_toeplitz_covariance(a, window_size=int(ws / 5))
@@ -36,7 +34,6 @@ def test_non_toeplitz(self):
plt.show()
def test_non_toeplitz_2d(self):
-
ws = 500
data = num.random.normal(scale=2, size=ws)
coords_x = get_random_uniform(-10000, 10000, dimension=ws)
@@ -52,10 +49,10 @@ def test_non_toeplitz_2d(self):
plt.colorbar(im)
plt.show()
+ @mark.skip(reason="requires dependend data")
def test_non_toeplitz_2d_data(self):
-
- from beat.utility import load_objects
from beat import config
+ from beat.utility import load_objects
home = "/home/vasyurhm/BEATS/PeaceRiver/Alberta2022joint/"
data = load_objects(home + "geodetic_data.pkl")
@@ -72,7 +69,6 @@ def test_non_toeplitz_2d_data(self):
plt.show()
def test_covariance_chol_inverse(self):
-
n = 10
a = num.random.rand(n**2).reshape(n, n)
C_d = a.T.dot(a) + num.eye(n) * 0.3
@@ -80,29 +76,30 @@ def test_covariance_chol_inverse(self):
cov = Covariance(data=C_d)
chol_ur = cov.chol_inverse
inverse_from_chol_qr = chol_ur.T.dot(chol_ur)
+ inverse_cov = cov.inverse()
if 1:
from matplotlib import pyplot as plt
fig, axs = plt.subplots(3, 2)
axs[0, 0].imshow(inverse_from_chol_qr)
- axs[0, 0].set_title("Inverse from QR cholesky")
- axs[0, 1].imshow(cov.inverse)
+ axs[0, 0].set_title("Inverse of QR cholesky")
+ axs[0, 1].imshow(inverse_cov)
axs[0, 1].set_title("Inverse from matrix inversion")
- I_diff = inverse_from_chol_qr - cov.inverse
- print(cov.inverse)
+ I_diff = inverse_from_chol_qr - inverse_cov
+ print(inverse_cov)
print("Idiff minmax", I_diff.min(), I_diff.max())
axs[1, 0].imshow(I_diff)
axs[1, 0].set_title("Difference")
# plt.colorbar(im2)
- I_div = num.log(num.abs(inverse_from_chol_qr / cov.inverse))
+ I_div = num.log(num.abs(inverse_from_chol_qr / inverse_cov))
print("minmax", I_div.min(), I_div.max())
axs[1, 1].imshow(I_div)
axs[1, 1].set_title("Ratio")
- axs[2, 0].imshow(cov.chol)
+ axs[2, 0].imshow(cov.chol())
axs[2, 0].set_title("Cholesky factor of cov")
axs[2, 1].imshow(cov.chol_inverse)
@@ -111,9 +108,10 @@ def test_covariance_chol_inverse(self):
plt.show()
num.testing.assert_allclose(
- inverse_from_chol_qr, cov.inverse, rtol=0.0, atol=1e-6
+ inverse_from_chol_qr, inverse_cov, rtol=0.0, atol=1e-6
)
+ @mark.skip(reason="requires dependend data")
def test_linear_velmod_covariance(self):
print("Warning!: Needs specific project_directory!")
project_dir = "/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_wide_cov"
@@ -125,7 +123,7 @@ def test_linear_velmod_covariance(self):
fig, axs = plt.subplots(2, 2)
for i, ds in enumerate(gc.datasets):
im1 = axs[i, 1].matshow(ds.covariance.data)
- im2 = axs[i, 0].matshow(ds.covariance.pred_v)
+ _ = axs[i, 0].matshow(ds.covariance.pred_v)
print("predv mena", ds.covariance.pred_v.mean())
print("data mena", ds.covariance.data.mean())
diff --git a/test/test_distributed.py b/test/test_distributed.py
index c7756f96..6445f850 100644
--- a/test/test_distributed.py
+++ b/test/test_distributed.py
@@ -16,7 +16,6 @@ def __init__(self, *args, **kwargs):
self.beatpath = project_root
def test_mpi_runner(self):
-
logger.info("testing")
runner = MPIRunner()
runner.run(self.beatpath + "/test/pt_toy_example.py", n_jobs=self.n_jobs)
@@ -29,6 +28,5 @@ def test_arg_passing(self):
if __name__ == "__main__":
-
util.setup_logging("test_distributed", "info")
unittest.main()
diff --git a/test/test_fastsweep.py b/test/test_fastsweep.py
index f8f0df88..b76317db 100644
--- a/test/test_fastsweep.py
+++ b/test/test_fastsweep.py
@@ -3,11 +3,11 @@
from time import time
import numpy as num
-import theano.tensor as tt
+import pytensor.tensor as tt
from pyrocko import util
-from theano import function
+from pytensor import function
-from beat import theanof
+from beat import pytensorf
from beat.fast_sweeping import fast_sweep
km = 1000.0
@@ -31,7 +31,6 @@ def get_slownesses(self):
return 1.0 / velocities
def _numpy_implementation(self):
-
slownesses = self.get_slownesses()
t0 = time()
@@ -43,14 +42,13 @@ def _numpy_implementation(self):
self.nuc_x,
self.nuc_y,
)
- print("np", numpy_start_times)
+ # print("np", numpy_start_times)
t1 = time()
logger.info("done numpy fast_sweeping in %f" % (t1 - t0))
return numpy_start_times
- def _theano_implementation(self):
-
+ def _pytensor_implementation(self):
Slownesses = self.get_slownesses()
slownesses = tt.dmatrix("slownesses")
@@ -64,22 +62,21 @@ def _theano_implementation(self):
patch_size = tt.cast(self.patch_size / km, "float64")
- theano_start_times = fast_sweep.get_rupture_times_theano(
+ pytensor_start_times = fast_sweep.get_rupture_times_pytensor(
slownesses, patch_size, nuc_x, nuc_y
)
t0 = time()
- f = function([slownesses, nuc_x, nuc_y], theano_start_times)
+ f = function([slownesses, nuc_x, nuc_y], pytensor_start_times)
t1 = time()
- theano_start_times = f(Slownesses, self.nuc_x, self.nuc_y)
+ pytensor_start_times = f(Slownesses, self.nuc_x, self.nuc_y)
t2 = time()
- logger.info("Theano compile time %f" % (t1 - t0))
- logger.info("done Theano fast_sweeping in %f" % (t2 - t1))
- return theano_start_times
-
- def _theano_c_wrapper(self):
+ logger.info("pytensor compile time %f" % (t1 - t0))
+ logger.info("done pytensor fast_sweeping in %f" % (t2 - t1))
+ return pytensor_start_times
+ def _pytensor_c_wrapper(self):
Slownesses = self.get_slownesses()
slownesses = tt.dvector("slownesses")
@@ -91,7 +88,7 @@ def _theano_c_wrapper(self):
nuc_y = tt.lscalar("nuc_y")
nuc_y.tag.test_value = self.nuc_y
- cleanup = theanof.Sweeper(
+ cleanup = pytensorf.Sweeper(
self.patch_size / km, self.n_patch_dip, self.n_patch_strike, "c"
)
@@ -100,13 +97,13 @@ def _theano_c_wrapper(self):
t0 = time()
f = function([slownesses, nuc_y, nuc_x], start_times)
t1 = time()
- theano_c_wrap_start_times = f(Slownesses.flatten(), self.nuc_y, self.nuc_x)
- print("tc", theano_c_wrap_start_times)
+ pytensor_c_wrap_start_times = f(Slownesses.flatten(), self.nuc_y, self.nuc_x)
+ # print("tc", pytensor_c_wrap_start_times)
t2 = time()
- logger.info("Theano C wrapper compile time %f" % (t1 - t0))
- logger.info("done theano C wrapper fast_sweeping in %f" % (t2 - t1))
- print("Theano C wrapper compile time %f" % (t1 - t0))
- return theano_c_wrap_start_times
+ logger.info("pytensor C wrapper compile time %f", (t1 - t0))
+ logger.info("done pytensor C wrapper fast_sweeping in %f", (t2 - t1))
+ logger.info("pytensor C wrapper compile time %f", (t1 - t0))
+ return pytensor_c_wrap_start_times
def _c_implementation(self):
slownesses = self.get_slownesses()
@@ -121,15 +118,15 @@ def _c_implementation(self):
self.nuc_y,
)
t1 = time()
- print("c", c_start_times)
+ # print("c", c_start_times)
logger.info("done c fast_sweeping in %f" % (t1 - t0))
return c_start_times
def test_differences(self):
np_i = self._numpy_implementation().flatten()
- t_i = self._theano_implementation().flatten()
+ t_i = self._pytensor_implementation().flatten()
c_i = self._c_implementation()
- tc_i = self._theano_c_wrapper()
+ tc_i = self._pytensor_c_wrapper()
num.testing.assert_allclose(np_i, t_i, rtol=0.0, atol=1e-6)
num.testing.assert_allclose(np_i, c_i, rtol=0.0, atol=1e-6)
diff --git a/test/test_ffi.py b/test/test_ffi.py
index 5c2776e9..31d655fe 100644
--- a/test/test_ffi.py
+++ b/test/test_ffi.py
@@ -3,13 +3,15 @@
from time import time
import numpy as num
-import theano.tensor as tt
+import pytensor.tensor as tt
from pyrocko import model, util
-from theano import config as tconfig
-from theano import function
+from pyrocko.gf import RectangularSource
+from pytensor import config as tconfig
+from pytensor import function
from beat import ffi
-from beat.heart import DynamicTarget, WaveformMapping
+from beat.config import SeismicGFLibraryConfig, WaveformFitConfig
+from beat.heart import DynamicTarget
from beat.utility import get_random_uniform
km = 1000.0
@@ -21,11 +23,13 @@ class FFITest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
+ var = "uparr"
nsamples = 10
ntargets = 30
npatches = 40
- sample_rate = 2.0
+ # sample_rate = 2.0
+ self.event = model.Event(lat=10.0, lon=10.0, depth=2.0 * km)
self.times = get_random_uniform(300.0, 500.0, ntargets)
self.starttime_min = 0.0
@@ -45,37 +49,44 @@ def __init__(self, *args, **kwargs):
durations = num.linspace(self.duration_min, self.duration_max, self.ndurations)
starttimes = num.linspace(self.starttime_min, self.starttime_max, nstarttimes)
- lats = num.random.randint(low=-90, high=90, size=ntargets)
- lons = num.random.randint(low=-180, high=180, size=ntargets)
-
- stations = [model.Station(lat=lat, lon=lon) for lat, lon in zip(lats, lons)]
+ # lats = num.random.randint(low=-90, high=90, size=ntargets)
+ # lons = num.random.randint(low=-180, high=180, size=ntargets)
+ # stations = [model.Station(lat=lat, lon=lon) for lat, lon in zip(lats, lons)]
targets = [DynamicTarget(store_id="Test_em_2.000_0") for i in range(ntargets)]
- wavemap = WaveformMapping(name="any_P", stations=stations, targets=targets)
+ wave_config = WaveformFitConfig()
+ # wavemap = WaveformMapping(name="any_P", stations=stations, targets=targets)
+
+ gfl_config = SeismicGFLibraryConfig(
+ component=var,
+ datatype="seismic",
+ event=self.event,
+ reference_sources=[RectangularSource.from_pyrocko_event(self.event)],
+ duration_sampling=duration_sampling,
+ starttime_sampling=starttime_sampling,
+ wave_config=wave_config,
+ dimensions=(ntargets, npatches, self.ndurations, nstarttimes, nsamples),
+ starttime_min=float(starttimes.min()),
+ duration_min=float(durations.min()),
+ mapnumber=1,
+ )
- # TODO needs updating
- # self.gfs = ffi.SeismicGFLibrary(
- # wavemap=wavemap, component='uperp',
- # duration_sampling=duration_sampling,
- # starttime_sampling=starttime_sampling,
- # starttime_min=self.starttime_min,
- # duration_min=self.duration_min)
- # self.gfs.setup(
- # ntargets, npatches, self.ndurations, nstarttimes,
- # nsamples, allocate=True)
+ self.gfs = ffi.SeismicGFLibrary(config=gfl_config)
+ self.gfs.setup(
+ ntargets, npatches, self.ndurations, nstarttimes, nsamples, allocate=True
+ )
+ print(self.gfs)
tracedata = num.tile(num.arange(nsamples), nstarttimes).reshape(
(nstarttimes, nsamples)
)
- # for i, target in enumerate(targets):
- # for patchidx in range(npatches):
- # for duration in durations:
- # tmin = self.times[i]
- # self.gfs.put(
- # tracedata * i, tmin, target, patchidx, duration,
- # starttimes)
+ for i, target in enumerate(targets):
+ for patchidx in range(npatches):
+ tmin = self.times[i]
+ self.gfs.set_patch_time(targetidx=i, tmin=tmin)
+ self.gfs.put(tracedata * i, i, patchidx, durations, starttimes)
def test_gf_setup(self):
print(self.gfs)
@@ -164,7 +175,6 @@ def theano_for_loop(gfs, durationidxs, starttimeidxs, slips):
num.testing.assert_allclose(outnum, outtheanofor, rtol=0.0, atol=1e-6)
def test_snuffle(self):
-
self.gfs.get_traces(
targets=self.gfs.wavemap.targets[0:2],
patchidxs=[0],
diff --git a/test/test_ffi_gfstacking.py b/test/test_ffi_gfstacking.py
index 56b5e02e..35fc03e3 100644
--- a/test/test_ffi_gfstacking.py
+++ b/test/test_ffi_gfstacking.py
@@ -1,264 +1,224 @@
-import os
+from os.path import join as pjoin
+from pathlib import Path
import numpy as num
-from pyrocko import gf, model
-from pyrocko import moment_tensor as mt
-from pyrocko import orthodrome as otd
-from pyrocko import trace, util
+from pyrocko import gf, trace, util
+from pytensor import config as tconfig
+from pytest import mark
-from beat import config, ffi, heart, inputf, models, utility
-from beat.sources import RectangularSource
+from beat import ffi, heart, models
+
+tconfig.compute_test_value = "off"
-km = 1000.0
util.setup_logging("test_ffi_stacking", "info")
# set random seed for reproducible station locations
num.random.seed(10)
+km = 1000.0
nuc_dip = 5.0
nuc_strike = 2.0
time_shift = -10.0 # from previous inversion
-# general
-project_dir = "/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_wide_kin3"
-store_superdirs = ["/home/vasyurhm/GF/Laquila"]
-white_noise_perc_max = 0.025 # White noise to disturb the synthetic data, in percent to the maximum amplitude [Hallo et al. 2016 use 0.01]
+project_dir = Path("/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_wide_kin3_v2")
-problem = models.load_model(project_dir, mode="ffi", build=False)
-event = problem.config.event
-components = ["uparr"] # , 'uperp']
+def array_to_traces(synthetics, reference_times, deltat, targets, location_tag=None):
+ synth_traces = []
+ for i, target in enumerate(targets):
+ tr = trace.Trace(ydata=synthetics[i, :], tmin=reference_times[i], deltat=deltat)
-starttime_sampling = 0.5
+ tr.set_codes(*target.codes)
+ if location_tag is not None:
+ tr.set_location(location_tag)
-arrival_taper = heart.ArrivalTaper(a=-15.0, b=-10.0, c=50.0, d=55.0)
+ synth_traces.append(tr)
-sc = problem.composites["seismic"]
-fault = sc.load_fault_geometry()
+ return synth_traces
-# get number of patches in dip and strike direction
-npdip, npstrike = fault.ordering.get_subfault_discretization(0)
-# do fast sweeping to get rupture onset times for patches with respect to hypocenter
-velocities = num.ones((npdip, npstrike)) * 3.5
+def get_max_relative_and_absolute_errors(a, b):
+ abs_err = num.abs(a - b).max()
+ rel_err = (num.abs((a - b) / b).max(),)
+ print("absolute", abs_err)
+ print("relative", rel_err)
+ return abs_err, rel_err
-nuc_dip_idx, nuc_strike_idx = fault.fault_locations2idxs(
- 0, nuc_dip, nuc_strike, backend="numpy"
-)
-starttimes = (
- fault.get_subfault_starttimes(0, velocities, nuc_dip_idx, nuc_strike_idx).ravel()
- + time_shift
-)
+def assert_traces(ref_traces, test_traces):
+ assert len(ref_traces) == len(test_traces)
-print(starttimes)
+ for ref_trace, test_trace in zip(ref_traces, test_traces):
+ num.testing.assert_allclose(
+ ref_trace.ydata, test_trace.ydata, rtol=5e-6, atol=5e-6
+ )
+ num.testing.assert_allclose(
+ ref_trace.tmin, test_trace.tmin, rtol=1e-3, atol=1e-3
+ )
-# defining distributed slip values for slip parallel and perpendicular directions
-uparr = num.ones((npdip, npstrike)) * 2.0
-# uparr[1:3, 3:7] = 1.5
-uperp = num.zeros((npdip, npstrike))
-# uperp[0,0] = 1.
-# uperp[3,9] = 1.
-uperp[1:3, 3:7] = 1.0
-# define rupture durations on each patch
-durations = num.ones((npdip, npstrike)) * 0.5
+@mark.skipif(project_dir.is_dir() is False, reason="Needs project dir")
+def test_gf_stacking():
+ # general
+ store_superdirs = ["/home/vasyurhm/GF/Laquila"]
-slips = {
- components[0]: uparr.ravel(),
- # components[1]: uperp.ravel(),
- "durations": durations.ravel(),
- "velocities": velocities.ravel(),
-}
+ problem = models.load_model(project_dir, mode="ffi", build=False)
+ event = problem.config.event
-print("fault parameters", slips)
+ components = ["uparr"] # , 'uperp']
-# update patches with distributed slip and STF values
-for comp in components:
- patches = fault.get_subfault_patches(0, datatype="seismic", component=comp)
+ starttime_sampling = 0.5 # noqa: F841
- for patch, starttime, duration, slip in zip(
- patches, starttimes, durations.ravel(), slips[comp]
- ):
- # stf = gf.HalfSinusoidSTF(anchor=-1., duration=float(duration))
- patch.stf.duration = float(duration)
- # stime = num.round(starttime / starttime_sampling) * starttime_sampling
- patch.update(slip=float(slip), time=event.time + float(starttime))
- # print(patch)
-
-# synthetics generation
-engine = gf.LocalEngine(store_superdirs=store_superdirs)
-
-patchidx = fault.patchmap(index=0, dipidx=nuc_dip_idx, strikeidx=nuc_strike_idx)
-
-targets = sc.wavemaps[0].targets
-filterer = sc.wavemaps[0].config.filterer
-ntargets = len(targets)
-
-gfs = ffi.load_gf_library(
- directory=project_dir + "/ffi/linear_gfs/", filename="seismic_uparr_any_P_0"
-)
-ats = gfs.reference_times - arrival_taper.b
-
-traces, tmins = heart.seis_synthetics(
- engine,
- patches,
- targets,
- arrival_times=ats,
- wavename="any_P",
- arrival_taper=arrival_taper,
- filterer=filterer,
- outmode="stacked_traces",
-)
-
-targetidxs = num.lib.index_tricks.s_[:]
-
-if False:
- # for station corrections maybe in the future?
- station_corrections = num.zeros(len(traces))
- starttimes = (
- num.tile(starttimes, ntargets) + num.repeat(station_corrections, fault.npatches)
- ).reshape(ntargets, fault.npatches)
- targetidxs = num.atleast_2d(num.arange(ntargets)).T
+ arrival_taper = heart.ArrivalTaper(a=-15.0, b=-10.0, c=50.0, d=55.0)
+
+ sc = problem.composites["seismic"]
+ fault = sc.load_fault_geometry()
+
+ # get number of patches in dip and strike direction
+ npdip, npstrike = fault.ordering.get_subfault_discretization(0)
+
+ # do fast sweeping to get rupture onset times for patches with respect to hypocenter
+ velocities = num.ones((npdip, npstrike)) * 3.5
-gfs.set_stack_mode("numpy")
-synthetics_nn = gfs.stack_all(
- targetidxs=targetidxs,
- starttimes=starttimes,
- durations=durations.ravel(),
- slips=slips[components[0]],
- interpolation="nearest_neighbor",
-)
-
-synthetics_ml = gfs.stack_all(
- targetidxs=targetidxs,
- starttimes=starttimes,
- durations=durations.ravel(),
- slips=slips[components[0]],
- interpolation="multilinear",
-)
-
-gfs.init_optimization()
-
-synthetics_nn_t = gfs.stack_all(
- targetidxs=targetidxs,
- starttimes=starttimes,
- durations=durations.ravel(),
- slips=slips[components[0]],
- interpolation="nearest_neighbor",
-).eval()
-
-synthetics_ml_t = gfs.stack_all(
- targetidxs=targetidxs,
- starttimes=starttimes,
- durations=durations.ravel(),
- slips=slips[components[0]],
- interpolation="multilinear",
-).eval()
-
-
-synth_traces_nn = []
-for i, target in enumerate(targets):
- tr = trace.Trace(
- ydata=synthetics_nn[i, :], tmin=gfs.reference_times[i], deltat=gfs.deltat
+ nuc_dip_idx, nuc_strike_idx = fault.fault_locations2idxs(
+ 0, nuc_dip, nuc_strike, backend="numpy"
)
- # print('trace tmin synthst', tr.tmin)
- tr.set_codes(*target.codes)
- tr.set_location("nn")
- synth_traces_nn.append(tr)
-
-synth_traces_ml = []
-for i, target in enumerate(targets):
- tr = trace.Trace(
- ydata=synthetics_ml[i, :], tmin=gfs.reference_times[i], deltat=gfs.deltat
+
+ starttimes = (
+ fault.get_subfault_starttimes(
+ 0, velocities, nuc_dip_idx, nuc_strike_idx
+ ).ravel()
+ + time_shift
+ )
+
+ # defining distributed slip values for slip parallel and perpendicular directions
+ uparr = num.ones((npdip, npstrike)) * 2.0
+ uperp = num.zeros((npdip, npstrike))
+ uperp[1:3, 3:7] = 1.0
+
+ # define rupture durations on each patch
+ durations = num.ones((npdip, npstrike)) * 0.5
+
+ slips = {
+ components[0]: uparr.ravel(),
+ # components[1]: uperp.ravel(),
+ "durations": durations.ravel(),
+ "velocities": velocities.ravel(),
+ }
+
+ # update patches with distributed slip and STF values
+ for comp in components:
+ patches = fault.get_subfault_patches(0, datatype="seismic", component=comp)
+
+ for patch, starttime, duration, slip in zip(
+ patches, starttimes, durations.ravel(), slips[comp]
+ ):
+ # stf = gf.HalfSinusoidSTF(anchor=-1., duration=float(duration))
+ patch.stf.duration = float(duration)
+ # stime = num.round(starttime / starttime_sampling) * starttime_sampling
+ patch.update(slip=float(slip), time=event.time + float(starttime))
+ # print(patch)
+
+ # synthetics generation
+ engine = gf.LocalEngine(store_superdirs=store_superdirs)
+ targets = sc.wavemaps[0].targets
+ filterer = sc.wavemaps[0].config.filterer
+ ntargets = len(targets)
+
+ gfs = ffi.load_gf_library(
+ directory=pjoin(project_dir, "ffi/linear_gfs/"),
+ filename="seismic_uparr_any_P_0",
)
- # print 'trace tmin synthst', tr.tmin
- tr.set_codes(*target.codes)
- tr.set_location("ml")
- synth_traces_ml.append(tr)
-
-synth_traces_nn_t = []
-for i, target in enumerate(targets):
- tr = trace.Trace(
- ydata=synthetics_nn_t[i, :], tmin=gfs.reference_times[i], deltat=gfs.deltat
+ ats = gfs.reference_times - arrival_taper.b
+
+ # seismosizer engine --> reference
+ ref_traces, _ = heart.seis_synthetics(
+ engine,
+ patches,
+ targets,
+ arrival_times=ats,
+ wavename="any_P",
+ arrival_taper=arrival_taper,
+ filterer=filterer,
+ outmode="stacked_traces",
)
- # print('trace tmin synthst', tr.tmin)
- tr.set_codes(*target.codes)
- tr.set_location("nn_t")
- synth_traces_nn_t.append(tr)
-
-synth_traces_ml_t = []
-for i, target in enumerate(targets):
- tr = trace.Trace(
- ydata=synthetics_ml_t[i, :], tmin=gfs.reference_times[i], deltat=gfs.deltat
+
+ targetidxs = num.atleast_2d(num.arange(ntargets)).T
+
+ if False:
+ # for station corrections maybe in the future?
+ station_corrections = num.zeros(len(ref_traces))
+ starttimes = (
+ num.tile(starttimes, ntargets)
+ + num.repeat(station_corrections, fault.npatches)
+ ).reshape(ntargets, fault.npatches)
+ targetidxs = num.atleast_2d(num.arange(ntargets)).T
+ elif True:
+ starttimes = num.tile(starttimes, ntargets).reshape((ntargets, uparr.size))
+
+ durations_dim2 = num.atleast_2d(durations.ravel())
+ patchidxs = num.arange(uparr.size, dtype="int")
+
+ # numpy stacking
+ gfs.set_stack_mode("numpy")
+ synthetics_nn = gfs.stack_all(
+ patchidxs=patchidxs,
+ targetidxs=targetidxs,
+ starttimes=starttimes[:, patchidxs],
+ durations=durations_dim2,
+ slips=slips[components[0]],
+ interpolation="nearest_neighbor",
)
- # print 'trace tmin synthst', tr.tmin
- tr.set_codes(*target.codes)
- tr.set_location("ml_t")
- synth_traces_ml_t.append(tr)
-
-# display to check
-trace.snuffle(
- traces + synth_traces_nn + synth_traces_ml + synth_traces_nn_t + synth_traces_ml_t,
- stations=sc.wavemaps[0].stations,
- events=[event],
-)
-
-traces1, tmins = heart.seis_synthetics(
- engine,
- [patches[0]],
- targets,
- arrival_times=ats,
- wavename="any_P",
- arrival_taper=arrival_taper,
- filterer=filterer,
- outmode="stacked_traces",
-)
-
-gfs.set_stack_mode("numpy")
-
-synth_traces_ml1 = []
-for i in range(1):
- synthetics_ml1 = gfs.stack_all(
+
+ synthetics_ml = gfs.stack_all(
+ patchidxs=patchidxs,
targetidxs=targetidxs,
- patchidxs=[i],
- starttimes=starttimes[0],
- durations=durations.ravel()[0],
- slips=num.atleast_1d(slips[components[0]][0]),
+ starttimes=starttimes[:, patchidxs],
+ durations=durations_dim2,
+ slips=slips[components[0]],
interpolation="multilinear",
)
- for i, target in enumerate(targets):
- tr = trace.Trace(
- ydata=synthetics_ml1[i, :], tmin=gfs.reference_times[i], deltat=gfs.deltat
+ # Pytensor stacking
+ gfs.init_optimization()
+
+ synthetics_nn_t = gfs.stack_all(
+ targetidxs=targetidxs,
+ starttimes=starttimes,
+ durations=durations_dim2,
+ slips=slips[components[0]],
+ interpolation="nearest_neighbor",
+ ).eval()
+
+ synthetics_ml_t = gfs.stack_all(
+ targetidxs=targetidxs,
+ starttimes=starttimes,
+ durations=durations_dim2,
+ slips=slips[components[0]],
+ interpolation="multilinear",
+ ).eval()
+
+ all_synth_traces = []
+ for test_synthetics, location_tag in zip(
+ [synthetics_nn, synthetics_ml, synthetics_nn_t, synthetics_ml_t],
+ ["nn", "ml", "nn_t", "ml_t"],
+ ):
+ test_traces = array_to_traces(
+ test_synthetics,
+ reference_times=gfs.reference_times,
+ deltat=gfs.deltat,
+ targets=targets,
+ location_tag=location_tag,
+ )
+
+ assert_traces(ref_traces, test_traces)
+ all_synth_traces.extend(test_traces)
+
+ if False:
+ # display to check
+ trace.snuffle(
+ ref_traces + all_synth_traces,
+ stations=sc.wavemaps[0].stations,
+ events=[event],
)
- print("trace tmin synthst", tr.tmin)
- # print(target.codes)
- tr.set_codes(*target.codes)
- tr.set_location("ml%i" % i)
- synth_traces_ml1.append(tr)
-
-trace.snuffle(
- traces1 + synth_traces_ml1, stations=sc.wavemaps[0].stations, events=[event]
-)
-
-# convert pyrocko traces to beat traces
-beat_traces = []
-for tr in traces:
- # print tr
- btrc = heart.SeismicDataset.from_pyrocko_trace(tr)
- seis_err_std = num.abs(btrc.ydata).max() * white_noise_perc_max
- noise = num.random.normal(0, seis_err_std, btrc.ydata.shape[0])
- btrc.ydata += noise
- btrc.set_location("0")
- beat_traces.append(btrc)
-
-# display to check noisy traces
-# trace.snuffle(beat_traces, stations=stations, events=[event])
-
-# save data to project folder
-seismic_outpath = os.path.join(project_dir, "seismic_data.pkl")
-# util.ensuredir(project_dir)
-# print 'saving synthetic data to: ', seismic_outpath
-# utility.dump_objects(seismic_outpath, outlist=[stations, beat_traces])
diff --git a/test/test_ffi_gfstacking_multifault.py b/test/test_ffi_gfstacking_multifault.py
index 696cdfaa..bd8098b2 100644
--- a/test/test_ffi_gfstacking_multifault.py
+++ b/test/test_ffi_gfstacking_multifault.py
@@ -1,13 +1,7 @@
-import os
-
import numpy as num
-from pyrocko import gf, model
-from pyrocko import moment_tensor as mt
-from pyrocko import orthodrome as otd
-from pyrocko import trace, util
+from pyrocko import gf, trace, util
-from beat import config, ffi, heart, inputf, models, utility
-from beat.sources import RectangularSource
+from beat import ffi, heart, models
from beat.utility import get_random_uniform
km = 1000.0
@@ -246,7 +240,7 @@ def to_pyrocko_traces(gfs, synthetics, targets, location=""):
engine,
[patches[0]],
targets,
- arrival_times=ats,
+ arrival_times=arrival_times,
wavename="any_P",
arrival_taper=arrival_taper,
filterer=filterer,
diff --git a/test/test_geodetic.py b/test/test_geodetic.py
index f513f127..e16ef22c 100644
--- a/test/test_geodetic.py
+++ b/test/test_geodetic.py
@@ -1,35 +1,33 @@
import logging
-import os
-import shutil
import unittest
from copy import deepcopy
-from tempfile import mkdtemp
+from pathlib import Path
import numpy as num
-import theano.tensor as tt
-from numpy.testing import assert_allclose
-from pyrocko import orthodrome, plot, trace, util
-from theano import config
+from pyrocko import util
+from pytensor import config as tconfig
+from pytest import mark
-from beat import heart, models
+from beat import models
-config.mode = "FAST_COMPILE"
+tconfig.mode = "FAST_COMPILE"
logger = logging.getLogger("test_geodetic")
km = 1000.0
-project_dir = "/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_nf"
+# TODO update with version 2.0.0 compliant setup
+project_dir = Path("/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_nf")
class TestGeodeticComposite(unittest.TestCase):
def setUp(self):
-
self.mode = "geometry"
+ mark.skipif(project_dir.is_dir() is False, reason="Needs project dir")
self.problem = models.load_model(project_dir, self.mode)
+ @mark.skipif(project_dir.is_dir() is False, reason="Needs project dir")
def test_step(self):
-
step = self.problem.init_sampler()
rp = self.problem.get_random_point()
rp1 = deepcopy(rp)
diff --git a/test/test_heart.py b/test/test_heart.py
index b72f1a69..c2fb123d 100644
--- a/test/test_heart.py
+++ b/test/test_heart.py
@@ -1,7 +1,5 @@
import logging
-import os
import unittest
-from copy import deepcopy
from time import time
import numpy as num
@@ -64,14 +62,12 @@ def setUp(self):
self.m9 = symmat6(*self.m6)
def test_radiation(self):
-
wavenames = ["any_P", "any_SH", "any_SV"]
mt = MomentTensor.from_values(self.m6)
print(mt)
for wavename in wavenames:
-
t0 = time()
amps = radiation_matmul(
self.m9, self.takeoff_angles_rad, self.azimuths_rad, wavename=wavename
@@ -98,7 +94,6 @@ def test_radiation(self):
assert_allclose(amps, amps_weights, atol=1e-6, rtol=1e-6)
def test_polarity_bb(self):
-
from matplotlib import pyplot as plt
from beat.plotting import draw_ray_piercing_points_bb
@@ -141,7 +136,7 @@ def test_polarity_bb(self):
ax,
wavename=wavename,
best_mt=None,
- **kwargs
+ **kwargs,
)
draw_ray_piercing_points_bb(
ax,
diff --git a/test/test_interseismic.py b/test/test_interseismic.py
deleted file mode 100644
index 641d74df..00000000
--- a/test/test_interseismic.py
+++ /dev/null
@@ -1,234 +0,0 @@
-import logging
-import os
-import unittest
-
-import numpy as num
-from numpy.testing import assert_allclose
-from pyrocko import orthodrome, plot, util
-
-from beat import interseismic, pscmp
-from beat.heart import (
- ReferenceLocation,
- velocities_from_pole,
- velocities_from_strain_rate_tensor,
-)
-
-km = 1000.0
-
-logger = logging.getLogger("test_interseismic")
-
-
-class TestInterseismic(unittest.TestCase):
- def __init__(self, *args, **kwargs):
- unittest.TestCase.__init__(self, *args, **kwargs)
-
- self.reference = None
- self.amplitude = 0.02
- self.azimuth = 115.0
- self.locking_depth = [6.3, 5.0]
-
- def _get_store_superdir(self):
- return os.path.abspath("data/")
-
- def _get_gf_store(self, crust_ind):
- store_superdir = self._get_store_superdir(self)
- return os.path.join(store_superdir, "psgrn_green_%i" % crust_ind)
-
- def _get_synthetic_data(self):
- lon = num.linspace(10.5, 13.5, 100)
- lat = num.linspace(44.0, 46.0, 100)
-
- Lon, Lat = num.meshgrid(lon, lat)
- reference = ReferenceLocation(lon=5.0, lat=45.0)
-
- self.lons = Lon.flatten()
- self.lats = Lat.flatten()
- self.reference = reference
-
- def _get_sources(self, case=1):
- if case == 1:
- sources = [
- pscmp.PsCmpRectangularSource(
- lon=12.0, lat=45.0, strike=20.0, dip=90.0, length=125.0 * km
- ),
- pscmp.PsCmpRectangularSource(
- lon=11.25, lat=44.35, strike=70.0, dip=90.0, length=80.0 * km
- ),
- ]
-
- elif case == 2:
- sources = [
- pscmp.PsCmpRectangularSource(
- lon=12.04,
- lat=45.000,
- strike=329.35 - 180,
- dip=90.0,
- length=117809.04,
- ),
- pscmp.PsCmpRectangularSource(
- lon=11.5, lat=45.75, strike=357.04 - 180, dip=90.0, length=80210.56
- ),
- ]
-
- for source in sources:
- north_shift, east_shift = orthodrome.latlon_to_ne_numpy(
- self.reference.lat,
- self.reference.lon,
- source.effective_lat,
- source.effective_lon,
- )
- source.update(
- lat=self.reference.lat,
- lon=self.reference.lon,
- north_shift=north_shift,
- east_shift=east_shift,
- )
- print(source)
-
- return sources
-
- def old_test_backslip_params(self):
- azimuth = (90.0, 0.0)
- strike = (0.0, 0.0)
- dip = (90.0, 90.0)
- amplitude = (0.1, 0.1)
- locking_depth = (5000.0, 5000.0)
-
- test_opening = (-0.1, 0.0)
- test_slip = (0.0, 0.1)
- test_rake = (
- 180.0,
- 0.0,
- )
-
- for i, (a, s, d, am, ld) in enumerate(
- zip(azimuth, strike, dip, amplitude, locking_depth)
- ):
-
- d = interseismic.backslip_params(a, s, d, am, ld)
-
- num.testing.assert_allclose(
- d["opening"], test_opening[i], rtol=0.0, atol=1e-6
- )
- num.testing.assert_allclose(d["slip"], test_slip[i], rtol=0.0, atol=1e-6)
- num.testing.assert_allclose(d["rake"], test_rake[i], rtol=0.0, atol=1e-6)
-
- def old_test_block_geometry(self):
-
- if self.reference is None:
- self._get_synthetic_data()
-
- return interseismic.block_geometry(
- lons=self.lons,
- lats=self.lats,
- sources=self._get_sources(),
- reference=self.reference,
- )
-
- def old_test_block_synthetics(self):
-
- if self.reference is None:
- self._get_synthetic_data()
-
- return interseismic.geo_block_synthetics(
- lons=self.lons,
- lats=self.lats,
- sources=self._get_sources(),
- amplitude=self.amplitude,
- azimuth=self.azimuth,
- reference=self.reference,
- )
-
- def _test_backslip_synthetics(self, case=1):
-
- if self.reference is None:
- self._get_synthetic_data()
-
- return interseismic.geo_backslip_synthetics(
- store_superdir=self._get_store_superdir(),
- crust_ind=0,
- sources=self._get_sources(case),
- lons=self.lons,
- lats=self.lats,
- reference=self.reference,
- amplitude=self.amplitude,
- azimuth=self.azimuth,
- locking_depth=self.locking_depth,
- )
-
- def _old_test_plot_synthetics(self):
- from matplotlib import pyplot as plt
-
- fig, ax = plt.subplots(
- nrows=1, ncols=3, figsize=plot.mpl_papersize("a4", "portrait")
- )
-
- cmap = plt.cm.jet
- fontsize = 12
- sz = 10.0
-
- if self.reference is None:
- self._get_synthetic_data()
-
- # disp = self.test_block_geometry()
- # disp = self.test_block_synthetics()
- disp = self._test_backslip_synthetics(2)
-
- for i, comp in enumerate("NEZ"):
- im = ax[i].scatter(self.lons, self.lats, sz, disp[:, i], cmap=cmap)
- cblabel = "%s displacement [m]" % comp
- cbs = plt.colorbar(im, ax=ax[i], orientation="horizontal", cmap=cmap)
- cbs.set_label(cblabel, fontsize=fontsize)
-
- plt.show()
-
- def test_plate_rotation(self):
- v1_ref = num.array([-40.33431624537931, 27.59254158624030, 0.0]) / km
- v2_ref = num.array([35.47707891158412, -27.93047805570016, 0.0]) / km
- v1 = velocities_from_pole(37.0, -123.0, 48.7, -78.2, 0.78).ravel()
- from time import time
-
- t2 = time()
- v2 = velocities_from_pole(34.75, -116.5, 48.7, -78.2, -0.78).ravel()
- t3 = time()
- assert_allclose(v1, v1_ref, atol=1e-3, rtol=0.0)
- assert_allclose(v2, v2_ref, atol=1e-3, rtol=0.0)
-
- logger.info("One point %f" % (t3 - t2))
- t0 = time()
- v3 = velocities_from_pole([37.0, 37.1], [-123.0, -125.5], 48.7, -78.2, 0.78)
- t1 = time()
-
- logger.info("Two points %f" % (t1 - t0))
- assert v3.shape == (2, 3)
- assert_allclose(v3[0, :], v1_ref, atol=1e-3, rtol=0.0)
-
- def test_velocities_from_strain_rate_tensor(self):
-
- nanostrain = 1e-9
- lats_vec = num.linspace(37.0, 37.5, 5)
- lons_vec = num.linspace(-122.0, -121.0, 5)
- eps_xx = 0.0 # nanostrain South Bay Block from Jolivet et al. 2015
- eps_yy = 58 # - 115.
- eps_xy = 0 # - 58.
- rotation = 0 # 1009.5 # mm/ ( yr * km)
-
- lons, lats = num.meshgrid(lons_vec, lats_vec)
- print(lats, lons)
-
- v_x, v_y, v_z = velocities_from_strain_rate_tensor(
- lats.ravel(), lons.ravel(), eps_xx, eps_yy, eps_xy, rotation
- ).T
-
- print("vmagn", num.sqrt(v_x**2 + v_y**2))
- from matplotlib import pyplot as plt
-
- fig, axs = plt.subplots(1, 1)
- axs.quiver(lons, lats, v_x, v_y)
- plt.show()
- print(v_z)
-
-
-if __name__ == "__main__":
- util.setup_logging("test_utility", "info")
- unittest.main()
diff --git a/test/test_laplacian.py b/test/test_laplacian.py
index 0dd22470..b9b08d2a 100644
--- a/test/test_laplacian.py
+++ b/test/test_laplacian.py
@@ -1,6 +1,5 @@
import logging
import unittest
-from time import time
import numpy as num
from matplotlib import pyplot as plt
@@ -14,7 +13,6 @@
class LaplacianTest(unittest.TestCase):
def setUp(self):
-
self.x = num.arange(0, 5.0)
self.y = num.arange(-5, 2.0)
xs, ys = num.meshgrid(self.x, self.y)
@@ -22,7 +20,6 @@ def setUp(self):
print(self.coords.shape)
def test_distances(self):
-
dists = laplacian.distances(self.coords, self.coords)
plt.matshow(dists)
diff --git a/test/test_models.py b/test/test_models.py
index d2c4a102..c3e38c00 100644
--- a/test/test_models.py
+++ b/test/test_models.py
@@ -5,17 +5,22 @@
import numpy as num
import scipy
from numpy.testing import assert_allclose
-from pymc3 import Model
-from pymc3.distributions import MvNormal
from pyrocko import util
-from theano import function, shared
-from theano import sparse as ts
-from theano import tensor as tt
-from theano.printing import Print
+from pytensor import config as tconfig
+from pytensor import function, shared
+from pytensor import sparse as ts
+from pytensor import tensor as tt
+from pytensor.printing import Print
from beat.heart import Covariance, SeismicDataset
from beat.info import project_root
-from beat.models import log_2pi, multivariate_normal, multivariate_normal_chol
+from beat.models.distributions import (
+ log_2pi,
+ multivariate_normal,
+ multivariate_normal_chol,
+)
+
+tconfig.compute_test_value = "off"
logger = logging.getLogger("test_models")
@@ -43,10 +48,10 @@ def multivariate_normal_nohypers(datasets, weights, hyperparams, residuals):
datasets : list
of :class:`heart.SeismicDataset` or :class:`heart.GeodeticDataset`
weights : list
- of :class:`theano.shared`
+ of :class:`pytensor.shared`
Square matrix of the inverse of the covariance matrix as weights
hyperparams : dict
- of :class:`theano.`
+ of :class:`pytensor.`
residual : list or array of model residuals
Returns
@@ -57,12 +62,12 @@ def multivariate_normal_nohypers(datasets, weights, hyperparams, residuals):
logpts = tt.zeros((n_t), "float64")
- for l, data in enumerate(datasets):
+ for idx, data in enumerate(datasets):
M = tt.cast(shared(data.samples, name="nsamples", borrow=True), "int16")
- maha = residuals[l].dot(weights[l]).dot(residuals[l].T)
- slogpdet = Print("theano logpdet")(data.covariance.slog_pdet)
+ maha = residuals[idx].dot(weights[idx]).dot(residuals[idx].T)
+ slogpdet = Print("pytensor logpdet")(data.covariance.slog_pdet)
logpts = tt.set_subtensor(
- logpts[l : l + 1], (-0.5) * (M * log_2pi + slogpdet + maha)
+ logpts[idx : idx + 1], (-0.5) * (M * log_2pi + slogpdet + maha)
)
return logpts
@@ -72,7 +77,7 @@ def generate_toydata(n_datasets, n_samples):
datasets = []
synthetics = []
for d in range(n_datasets):
- a = num.atleast_2d(num.random.rand(n_samples))
+ # a = num.atleast_2d(num.random.rand(n_samples))
# C = a * a.T + num.eye(n_samples) * 0.001
C = num.eye(n_samples) * 0.001
kwargs = dict(
@@ -91,14 +96,14 @@ def make_weights(datasets, wtype, make_shared=False, sparse=False):
weights = []
for ds in datasets:
if wtype == "ichol":
- w = num.linalg.inv(ds.covariance.chol)
+ w = num.linalg.inv(ds.covariance.chol())
# print ds.covariance.chol_inverse
elif wtype == "icov_chol":
w = ds.covariance.chol_inverse
# print w
elif wtype == "icov":
- w = ds.covariance.inverse
+ w = ds.covariance.inverse()
else:
raise NotImplementedError("wtype not implemented!")
@@ -114,12 +119,6 @@ def make_weights(datasets, wtype, make_shared=False, sparse=False):
return weights
-def get_bulk_weights(weights):
- return tt.concatenate(
- [C.reshape((1, n_samples, n_samples)) for C in weights], axis=0
- )
-
-
class TestModels(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
@@ -136,11 +135,11 @@ def __init__(self, *args, **kwargs):
)
def test_scaling(self):
-
- maha1 = -(1.0 / 2 * self.scaling) * self.residuals[0, :].dot(
- self.datasets[0].covariance.inverse
- ).dot(self.residuals[0, :])
- cov_i_scaled = self.scaling * self.datasets[0].covariance.inverse
+ covi = self.datasets[0].covariance.inverse()
+ maha1 = -(1.0 / 2 * self.scaling) * self.residuals[0, :].dot(covi).dot(
+ self.residuals[0, :]
+ )
+ cov_i_scaled = self.scaling * covi
maha2 = -(1.0 / 2) * self.residuals[0, :].dot(cov_i_scaled).dot(
self.residuals[0, :]
)
@@ -148,7 +147,6 @@ def test_scaling(self):
assert_allclose(maha1, maha2, rtol=0.0, atol=1e-6)
def test_reference_llk_nohypers(self):
-
res = tt.matrix("residuals")
icov_weights_numpy = make_weights(self.datasets, "icov", False)
@@ -166,7 +164,7 @@ def test_reference_llk_nohypers(self):
logpdet = data.covariance.log_pdet
assert_allclose(logpdet, psd.log_pdet, rtol=0.0, atol=1e-6)
- assert_allclose(psd.pinv, data.covariance.inverse, rtol=0.0, atol=1e-6)
+ assert_allclose(psd.pinv, data.covariance.inverse(), rtol=0.0, atol=1e-6)
d[i] = normal_logpdf_cov(
data.ydata, self.synthetics[i], data.covariance.data
@@ -224,7 +222,6 @@ def test_mvn_cholesky(self):
assert_allclose(d, b, rtol=0.0, atol=1e-6)
def test_sparse(self):
-
res = tt.matrix("residuals")
ichol_weights = make_weights(self.datasets, "ichol", True, sparse=True)
@@ -261,70 +258,7 @@ def test_sparse(self):
assert_allclose(a, b, rtol=0.0, atol=1e-6)
- def test_bulk(self):
- def multivariate_normal_bulk_chol(
- bulk_weights, hps, slog_pdets, residuals, hp_specific=False
- ):
-
- M = residuals.shape[1]
- tmp = tt.batched_dot(bulk_weights, residuals)
- llk = tt.power(tmp, 2).sum(1)
- return (-0.5) * (
- slog_pdets
- + (M * (2 * hps + num.log(2 * num.pi)))
- + (1 / tt.exp(hps * 2)) * (llk)
- )
-
- res = tt.matrix("residuals")
- ichol_weights = make_weights(self.datasets, "ichol", True)
- icov_weights = make_weights(self.datasets, "icov", True)
- icov_chol_weights = make_weights(self.datasets, "icov_chol", True)
-
- ichol_bulk_weights = get_bulk_weights(ichol_weights)
- icov_chol_bulk_weights = get_bulk_weights(icov_chol_weights)
-
- slog_pdets = tt.concatenate(
- [data.covariance.slog_pdet.reshape((1,)) for data in self.datasets]
- )
-
- ichol_bulk_llk = multivariate_normal_bulk_chol(
- bulk_weights=ichol_bulk_weights,
- hps=self.hyperparams["h_any_P_T"],
- slog_pdets=slog_pdets,
- residuals=res,
- )
-
- icov_chol_bulk_llk = multivariate_normal_bulk_chol(
- bulk_weights=icov_chol_bulk_weights,
- hps=self.hyperparams["h_any_P_T"],
- slog_pdets=slog_pdets,
- residuals=res,
- )
-
- llk_normal = multivariate_normal(
- self.datasets, icov_weights, self.hyperparams, res
- )
-
- fnorm = function([res], llk_normal)
- f_bulk_ichol = function([res], ichol_bulk_llk)
- f_bulk_icov_chol = function([res], icov_chol_bulk_llk)
-
- t0 = time()
- a = f_bulk_ichol(self.residuals)
- t1 = time()
- b = f_bulk_icov_chol(self.residuals)
- t2 = time()
- c = fnorm(self.residuals)
-
- logger.info("Bulk Ichol %f [s]" % (t1 - t0))
- logger.info("Bulk Icov_chol %f [s]" % (t2 - t1))
-
- assert_allclose(a, c, rtol=0.0, atol=1e-6)
- assert_allclose(b, c, rtol=0.0, atol=1e-6)
- assert_allclose(a, b, rtol=0.0, atol=1e-6)
-
if __name__ == "__main__":
-
util.setup_logging("test_models", "info")
unittest.main()
diff --git a/test/test_paripool.py b/test/test_paripool.py
index 16affe18..71458684 100644
--- a/test/test_paripool.py
+++ b/test/test_paripool.py
@@ -5,7 +5,7 @@
import numpy as num
from pyrocko import util
-from beat import paripool
+from beat.parallel import paripool
logger = logging.getLogger("test_paripool")
@@ -22,9 +22,8 @@ def __init__(self, *args, **kwargs):
self.factors = num.array([0, 1, 2, 3, 2, 1, 0])
def test_pool(self):
-
featureClass = [[k, 1] for k in self.factors] # list of arguments
- p = paripool.paripool(add, featureClass, chunksize=2, nprocs=4, timeout=3)
+ p = paripool(add, featureClass, chunksize=2, nprocs=4, timeout=3)
ref_values = (self.factors + 1).tolist()
ref_values[3] = None
diff --git a/test/test_plotting.py b/test/test_plotting.py
index d303252c..fe7f6352 100644
--- a/test/test_plotting.py
+++ b/test/test_plotting.py
@@ -5,13 +5,12 @@
from matplotlib import pyplot as plt
from pyrocko import util
-from beat.models.distributions import vonmises_std
from beat.plotting import (
- format_axes,
draw_line_on_array,
+ format_axes,
+ hist2d_plot_op,
lune_plot,
spherical_kde_op,
- hist2d_plot_op,
)
logger = logging.getLogger("test_distributed")
@@ -24,7 +23,6 @@ def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def rtest_draw_line_array(self):
-
amplitude = 10
x = num.arange(0, 100, 0.5)
y = amplitude * num.sin(x)
@@ -43,12 +41,11 @@ def rtest_draw_line_array(self):
plt.show()
def test_spherical_kde_op(self):
-
nsamples = 10
lats0 = num.rad2deg(num.random.normal(loc=0.0, scale=0.1, size=nsamples))
lons0 = num.rad2deg(num.random.normal(loc=-3.14, scale=0.3, size=nsamples))
- kde, lats, lons = spherical_kde_op(lats0, lons0, grid_size=(200, 200))
+ kde, lats, lons = spherical_kde_op(lats0, lons0, grid_size=(100, 100))
ax = plt.axes()
im = ax.matshow(kde, extent=(-180, 180, -90, 90), origin="lower")
@@ -57,8 +54,7 @@ def test_spherical_kde_op(self):
plt.show()
def test_lune_plot(self):
-
- nsamples = 2100
+ nsamples = 1100
# latitude
w = num.random.normal(loc=0.5, scale=0.1, size=nsamples)
w_bound = 3.0 * num.pi / 8.0
@@ -74,7 +70,6 @@ def test_lune_plot(self):
gmt.save("lune_test.pdf", resolution=300, size=10)
def test_hist2d_plot_op(self):
-
ndraws = 300
ones = num.ones((ndraws))
@@ -102,6 +97,5 @@ def test_hist2d_plot_op(self):
if __name__ == "__main__":
-
util.setup_logging("test_plotting", "info")
unittest.main()
diff --git a/test/test_proposals.py b/test/test_proposals.py
index 14550fde..6790c991 100644
--- a/test/test_proposals.py
+++ b/test/test_proposals.py
@@ -16,7 +16,6 @@ def __init__(self, *args, **kwargs):
self.draws = 10
def test_proposals(self):
-
for proposal in available_proposals():
if proposal in multivariate_proposals:
scale = num.eye(2) * 0.5
@@ -28,6 +27,5 @@ def test_proposals(self):
if __name__ == "__main__":
-
util.setup_logging("test_proposals", "info")
unittest.main()
diff --git a/test/test_pt.py b/test/test_pt.py
index f05be419..e854beaa 100644
--- a/test/test_pt.py
+++ b/test/test_pt.py
@@ -5,17 +5,20 @@
from tempfile import mkdtemp
import numpy as num
-import pymc3 as pm
-import theano.tensor as tt
+import pymc as pm
+import pytensor.tensor as tt
+from arviz import plot_trace
+from matplotlib import pyplot as plt
from pyrocko import util
from pyrocko.plot import mpl_papersize
+from pytensor import config as tconfig
-from beat.backend import SampleStage
+from beat.backend import SampleStage, multitrace_to_inference_data
from beat.config import sample_p_outname
from beat.sampler import metropolis, pt
-from beat.sampler.pt import SamplingHistory
from beat.utility import load_objects, mod_i
+tconfig.compute_test_value = "pdb"
logger = logging.getLogger("test_pt")
@@ -27,13 +30,13 @@ def __init__(self, *args, **kwargs):
logger.info("Test result in: \n %s" % self.test_folder_multi)
- self.n_chains = 8
- self.n_workers_posterior = 2
- self.n_samples = int(3e4)
+ self.n_chains = 4
+ self.n_workers_posterior = 1
+ self.n_samples = int(3e3)
self.tune_interval = 50
- self.beta_tune_interval = 3000
+ self.beta_tune_interval = 300
self.swap_interval = (10, 15)
- self.buffer_size = self.n_samples / 10.0
+ self.buffer_size = int(self.n_samples / 10)
self.burn = 0.5
self.thin = 1
@@ -72,11 +75,11 @@ def two_gaussians(x):
shape=n,
lower=-2.0 * num.ones_like(mu1),
upper=2.0 * num.ones_like(mu1),
- testval=-1.0 * num.ones_like(mu1),
+ initval=-1.0 * num.ones_like(mu1),
transform=None,
)
like = pm.Deterministic("tmp", two_gaussians(X))
- llk = pm.Potential("like", like)
+ llk = pm.Potential("like", like) # noqa: F841
with PT_test:
step = metropolis.Metropolis(
@@ -101,12 +104,15 @@ def two_gaussians(x):
keep_tmp=False,
)
+ print("Result folder:", test_folder)
stage_handler = SampleStage(test_folder)
- mtrace = stage_handler.load_multitrace(-1, varnames=PT_test.vars)
+ mtrace = stage_handler.load_multitrace(-1, varnames=PT_test.value_vars)
history = load_objects(
os.path.join(stage_handler.stage_path(-1), sample_p_outname)
)
+ print(mtrace)
+ idata = multitrace_to_inference_data(mtrace)
n_steps = self.n_samples
burn = self.burn
@@ -125,11 +131,8 @@ def burn_sample(x):
return num.vstack(xout)
- from matplotlib import pyplot as plt
- from pymc3 import traceplot
-
with PT_test:
- traceplot(mtrace, transform=burn_sample)
+ plot_trace(idata, transform=None)
fig, axes = plt.subplots(
nrows=1, ncols=2, figsize=mpl_papersize("a5", "portrait")
diff --git a/test/test_resolution_subsampling.py b/test/test_resolution_subsampling.py
index 06daa471..f3ca51dc 100644
--- a/test/test_resolution_subsampling.py
+++ b/test/test_resolution_subsampling.py
@@ -2,9 +2,9 @@
from os.path import join as pjoin
import numpy as num
-from pyrocko import model
+import pytest
+from pyrocko import model, util
from pyrocko import orthodrome as otd
-from pyrocko import util
from pyrocko.gf.seismosizer import LocalEngine
from scipy.io import loadmat
@@ -12,37 +12,29 @@
from beat.config import ResolutionDiscretizationConfig
from beat.ffi import discretize_sources, optimize_discretization
from beat.heart import DiffIFG, init_geodetic_targets
+from beat.plotting import source_geometry
from beat.sources import RectangularSource
util.setup_logging("R-based subsampling", "info")
-real = False # if use real data
-synth_data_dist = "half" # uniform / half
-
km = 1000.0
nworkers = 4
-n_pix = 635 # 545, 193, 635
-
-store_superdirs = "/home/vasyurhm/GF/Marmara"
-varnames = ["uparr"]
-
-event = model.Event(
- lat=40.896,
- lon=28.86,
- time=util.str_to_time("2019-10-12 00:00:00"),
- depth=15000.0,
- name="marm",
- magnitude=7.0,
-)
-testdata_path = pjoin(info.project_root, "data/test/InputData.mat")
-d = loadmat(testdata_path)
-source_params = d["pm"]
-
-## optimize discretization
# load data and setup
-if real:
+def get_test_real_metzger(
+ source_params,
+ event,
+ data_dist,
+):
+ testdata_path = pjoin(info.project_root, "data/test/InputData.mat")
+ d = loadmat(testdata_path)
+
+ if data_dist == "uniform":
+ n_pix = 635 # 545, 193, 635
+ elif data_dist == "half":
+ n_pix = 193
+
data_xloc = d["X"][0:n_pix]
x_shift = data_xloc.min()
data_xloc -= x_shift
@@ -109,16 +101,18 @@
extension_lengths=[0.0, 0.0, 0.0],
extension_widths=[0.0, 0.0, 0.0],
)
-else:
+ return config, sources, data_yloc, data_xloc, los
- if synth_data_dist == "uniform":
+
+def get_test_synth(source_params, event, data_dist="uniform"):
+ if data_dist == "uniform":
yvec = num.linspace(-15.0, 15.0, 30)
xvec = num.linspace(-20.0, 20.0, 40)
- elif synth_data_dist == "half":
+ elif data_dist == "half":
yvec = num.linspace(-15.0, 15.0, 30)
xvec = num.linspace(-20.0, -5.0, 20)
- y_shift = x_shift = 0.0
+ # y_shift = x_shift = 0.0
X, Y = num.meshgrid(xvec, yvec)
data_xloc = X.ravel()
data_yloc = Y.ravel()
@@ -151,12 +145,12 @@
Length, Width, Depth, Dip, Strike, Xloc, Yloc, strsl, dipsl, _ = source_params[
:, sps
]
- print(Xloc, Yloc)
+ # print(Xloc, Yloc)
lat, lon = otd.ne_to_latlon(event.lat, event.lon, 0.0 * km, 0.0 * km)
- rake = math.atan2(dipsl, strsl)
- print("d,s,r", dipsl, strsl, rake)
- slip = math.sqrt(strsl**2 + dipsl**2)
- print("lat,lon", lat, lon)
+ # rake = math.atan2(dipsl, strsl)
+
+ # slip = math.sqrt(strsl**2 + dipsl**2)
+
rf = RectangularSource(
lat=lat,
lon=lon,
@@ -173,60 +167,86 @@
print(rf)
sources.append(rf)
-lats, lons = otd.ne_to_latlon(event.lat, event.lon, data_yloc * km, data_xloc * km)
-
-datasets = [
- DiffIFG(
- east_shifts=num.zeros_like(data_yloc).ravel(),
- north_shifts=num.zeros_like(data_yloc).ravel(),
- odw=num.ones_like(data_yloc).ravel(),
- lats=lats.ravel(),
- lons=lons.ravel(),
- los_vector=los,
- displacement=num.zeros_like(data_yloc).ravel(),
+ return config, sources, data_yloc, data_xloc, los
+
+
+@pytest.mark.parametrize("data_dist", ["uniform", "half"])
+@pytest.mark.parametrize("data_source", [get_test_synth, get_test_real_metzger])
+def test_resolution_subsampling(data_source, data_dist):
+ store_superdirs = "/home/vasyurhm/GF/Marmara"
+ varnames = ["uparr"]
+
+ event = model.Event(
+ lat=40.896,
+ lon=28.86,
+ time=util.str_to_time("2019-10-12 00:00:00"),
+ depth=15000.0,
+ name="marm",
+ magnitude=7.0,
)
-]
-
-
-fault = discretize_sources(
- config, sources=sources, datatypes=["geodetic"], varnames=varnames, tolerance=0.5
-)
-
-
-engine = LocalEngine(store_superdirs=[store_superdirs])
-
-targets = init_geodetic_targets(
- datasets,
- earth_model_name="ak135-f-continental.m",
- interpolation="multilinear",
- crust_inds=[0],
- sample_rate=0.0,
-)
-
-print(event)
-opt_fault, R = optimize_discretization(
- config,
- fault,
- datasets=datasets,
- varnames=varnames,
- crust_ind=0,
- engine=engine,
- targets=targets,
- event=event,
- force=True,
- nworkers=nworkers,
- debug=False,
- method="laplacian",
-)
-from beat.plotting import source_geometry
+ testdata_path = pjoin(info.project_root, "data/test/InputData.mat")
+ d = loadmat(testdata_path)
+ source_params = d["pm"]
+
+ config, sources, data_yloc, data_xloc, los = data_source(
+ source_params, event, data_dist
+ )
+
+ lats, lons = otd.ne_to_latlon(event.lat, event.lon, data_yloc * km, data_xloc * km)
+
+ datasets = [
+ DiffIFG(
+ east_shifts=num.zeros_like(data_yloc).ravel(),
+ north_shifts=num.zeros_like(data_yloc).ravel(),
+ odw=num.ones_like(data_yloc).ravel(),
+ lats=lats.ravel(),
+ lons=lons.ravel(),
+ los_vector=los,
+ displacement=num.zeros_like(data_yloc).ravel(),
+ )
+ ]
+
+ fault = discretize_sources(
+ config,
+ sources=sources,
+ datatypes=["geodetic"],
+ varnames=varnames,
+ tolerance=0.5,
+ )
+
+ engine = LocalEngine(store_superdirs=[store_superdirs])
+
+ targets = init_geodetic_targets(
+ datasets,
+ event=event,
+ earth_model_name="ak135-f-continental.m",
+ interpolation="multilinear",
+ crust_inds=[0],
+ sample_rate=0.0,
+ )
-fig, ax = source_geometry(
- opt_fault,
- list(fault.iter_subfaults()),
- event=event,
- values=R,
- title="Resolution",
- datasets=datasets,
- show=True,
-)
+ opt_fault, R = optimize_discretization(
+ config,
+ fault,
+ datasets=datasets,
+ varnames=varnames,
+ crust_ind=0,
+ engine=engine,
+ targets=targets,
+ event=event,
+ force=True,
+ nworkers=nworkers,
+ debug=False,
+ method="laplacian",
+ )
+
+ fig, ax = source_geometry(
+ opt_fault,
+ list(fault.iter_subfaults()),
+ event=event,
+ values=R,
+ title="Resolution",
+ datasets=datasets,
+ show=True,
+ )
diff --git a/test/test_sampler.py b/test/test_sampler.py
index 8ad5e15f..bbbd02bf 100644
--- a/test/test_sampler.py
+++ b/test/test_sampler.py
@@ -4,9 +4,10 @@
import matplotlib.pyplot as plt
import numpy as num
+from arviz import plot_density
from numpy import array
-from pymc3.plots import kdeplot
from pyrocko import util
+from pytest import mark
from beat.models import load_model
from beat.sampler import base
@@ -22,7 +23,6 @@
class SamplerTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
-
self.plot = 1
unittest.TestCase.__init__(self, *args, **kwargs)
@@ -31,7 +31,6 @@ def __init__(self, *args, **kwargs):
self.mvcauchy = base.MultivariateCauchyProposal(num.array([[1.0]]))
def test_proposals(self):
-
nsamples = 100000
discard = 1000
@@ -48,12 +47,12 @@ def test_proposals(self):
if self.plot:
ax = plt.axes()
for d, color in zip([ndist, cdist, mvcdist], ["black", "blue", "red"]):
-
- ax = kdeplot(d, ax=ax, color=color)
+ ax = plot_density(num.atleast_2d(d).T, ax=ax, colors=color)
ax.set_xlim([-10.0, 10.0])
plt.show()
+ @mark.skip("Not sure what was tested here")
def test_smc_vs_pt(self):
problem_smc = load_model(smc_res_dir, "geometry", build=True)
problem_pt = load_model(pt_res_dir, "geometry", build=True)
@@ -64,7 +63,7 @@ def test_smc_vs_pt(self):
step_pt = problem_pt.init_sampler(False)
print("compiled pt")
- maxpoint = {
+ max_point = {
"depth": array([1.0]),
"dip": array([42.0]),
"duration": array([8.0]),
@@ -120,6 +119,7 @@ def test_smc_vs_pt(self):
point_pt, _ = step_pt.step(point)
print(point_pt, point_smc)
+ print(max_point)
if __name__ == "__main__":
diff --git a/test/test_smc.py b/test/test_smc.py
index 7c96d67c..b0f73ce3 100644
--- a/test/test_smc.py
+++ b/test/test_smc.py
@@ -1,19 +1,22 @@
import logging
import multiprocessing as mp
-import os
import shutil
import unittest
from tempfile import mkdtemp
import numpy as num
-import pymc3 as pm
-import theano.tensor as tt
+import pymc as pm
+import pytensor.tensor as tt
from pyrocko import util
+from pytensor import config as tconfig
-from beat import backend, smc, utility
+from beat import backend, utility
+from beat.sampler import smc
logger = logging.getLogger("test_smc")
+tconfig.compute_test_value = "pdb"
+
class TestSMC(unittest.TestCase):
def __init__(self, *args, **kwargs):
@@ -28,7 +31,7 @@ def __init__(self, *args, **kwargs):
)
self.n_cpu = mp.cpu_count()
- self.n_chains = 300
+ self.n_chains = 100
self.n_steps = 100
self.tune_interval = 25
@@ -70,11 +73,10 @@ def two_gaussians(x):
shape=n,
lower=-2.0 * num.ones_like(mu1),
upper=2.0 * num.ones_like(mu1),
- testval=-1.0 * num.ones_like(mu1),
+ initval=-1.0 * num.ones_like(mu1),
transform=None,
)
- like = pm.Deterministic("like", two_gaussians(X))
- llk = pm.Potential("like", like)
+ _ = pm.Deterministic("like", two_gaussians(X))
with SMC_test:
step = smc.SMC(
@@ -96,7 +98,7 @@ def two_gaussians(x):
stage_handler = backend.SampleStage(test_folder)
- mtrace = stage_handler.load_multitrace(-1, model=SMC_test)
+ mtrace = stage_handler.load_multitrace(-1, varnames=SMC_test.value_vars)
d = mtrace.get_values("X", combine=True, squeeze=True)
x = last_sample(d)
diff --git a/test/test_sources.py b/test/test_sources.py
index 633ddce9..e1669c18 100644
--- a/test/test_sources.py
+++ b/test/test_sources.py
@@ -1,10 +1,12 @@
import logging
import unittest
+from importlib.util import find_spec
import numpy as num
import pyrocko.moment_tensor as mtm
from numpy.testing import assert_allclose
from pyrocko import util
+from pytest import mark
from beat.sources import MTQTSource
@@ -17,7 +19,6 @@ def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def test_MTSourceQT(self):
-
# from Tape & Tape 2015 Appendix A:
(u, v, kappa, sigma, h) = (
3.0 / 8.0 * pi,
@@ -58,10 +59,13 @@ def test_MTSourceQT(self):
print("M9 NEED", mt.m9)
print("M9 NWU", mt.m9_nwu)
+ @mark.skipif(
+ (find_spec("mtpar") is None), reason="Test needs 'mtpar' to be installed"
+ )
def test_vs_mtpar(self):
try:
import mtpar
- except (ImportError):
+ except ImportError:
logger.warning(
"This test needs mtpar to be installed: "
"https://github.com/rmodrak/mtpar/"
@@ -122,6 +126,5 @@ def test_vs_mtpar(self):
if __name__ == "__main__":
-
util.setup_logging("test_sources", "info")
unittest.main()
diff --git a/test/test_utility.py b/test/test_utility.py
index 4a7ba0d8..2054dedf 100644
--- a/test/test_utility.py
+++ b/test/test_utility.py
@@ -1,10 +1,9 @@
import logging
import unittest
-from tempfile import mkdtemp
from time import time
import numpy as num
-import theano.tensor as tt
+import pytensor.tensor as tt
from pyrocko import util
from beat import utility
@@ -20,7 +19,6 @@ def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def test_rotation(self):
-
self.R = utility.get_rotation_matrix(["x", "y"])
self.Rz = utility.get_rotation_matrix("z")
@@ -60,15 +58,16 @@ def test_list_ordering(self):
lpoint = [a, b, c]
lij = utility.ListToArrayBijection(lordering, lpoint)
- ref_point = {"a": a, "b": b, "c": c}
+ # ref_point = {"a": a, "b": b, "c": c}
array = lij.l2a(lpoint)
point = lij.l2d(lpoint)
- print("arr", array)
- print("point, ref_point", point, ref_point)
- print(lij.l2d(lij.a2l(array)))
- def test_window_rms(self):
+ point_from_array = lij.l2d(lij.a2l(array))
+ for k, val in point_from_array.items():
+ num.testing.assert_allclose(val, point[k])
+
+ def test_window_rms(self):
data = num.random.randn(5000)
ws = int(data.size / 5)
t0 = time()
diff --git a/test/test_voronoi.py b/test/test_voronoi.py
index 523e7d3b..bc124e09 100644
--- a/test/test_voronoi.py
+++ b/test/test_voronoi.py
@@ -17,7 +17,6 @@
def plot_voronoi_cell_discretization(
gfs_dip, gfs_strike, voro_dip, voro_strike, gf2voro_idxs
):
-
ax = plt.axes()
ax.plot(gfs_strike, gfs_dip, "xk")
@@ -62,7 +61,6 @@ def __init__(self, *args, **kwargs):
)
def test_voronoi_discretization(self):
-
t0 = time()
gf2voro_idxs_c = voronoi.get_voronoi_cell_indexes_c(
self.gf_points_dip,
@@ -99,7 +97,7 @@ def test_voronoi_discretization(self):
self.gf_points_strike,
self.voronoi_points_dip,
self.voronoi_points_strike,
- gf2voro_idxs,
+ gf2voro_idxs_numpy,
)